In [ ]:
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
import os, shutil, subprocess, getpass, urllib.parse
GITHUB_USER = "avahuu"
REPO_NAME = "cal_school"
BRANCH = "main"
DRIVE_DIR = "/content/drive/MyDrive/github"
LOCAL_PATH = f"{DRIVE_DIR}/{REPO_NAME}"
# read token
print("Paste your GitHub Personal Access Token (PAT). It will NOT be saved:")
raw = getpass.getpass()
TOKEN = urllib.parse.quote(raw.strip(), safe="")
# clone
if not os.path.exists(LOCAL_PATH):
auth_url = f"https://{GITHUB_USER}:{TOKEN}@github.com/{GITHUB_USER}/{REPO_NAME}.git"
print("Cloning to:", LOCAL_PATH)
subprocess.run(["git","clone",auth_url,LOCAL_PATH], check=True, cwd=DRIVE_DIR)
else:
print("Repo exists:", LOCAL_PATH)
# safety check (ty GPT)
os.chdir(LOCAL_PATH)
subprocess.run(["git","remote","set-url","origin", f"https://github.com/{GITHUB_USER}/{REPO_NAME}.git"], check=True)
rc = subprocess.run(["git","checkout",BRANCH])
if rc.returncode != 0:
subprocess.run(["git","checkout","-b",BRANCH,f"origin/{BRANCH}"], check=True)
subprocess.run(["git","pull","origin",BRANCH], check=False)
print("\n✅ Ready at:", LOCAL_PATH)
Mounted at /content/drive Paste your GitHub Personal Access Token (PAT). It will NOT be saved: ·········· Repo exists: /content/drive/MyDrive/github/cal_school ✅ Ready at: /content/drive/MyDrive/github/cal_school
In [ ]:
import pandas as pd
In [ ]:
df = pd.read_csv('/content/drive/MyDrive/github/cal_school/sources/2021-22-crdc-data/SCH/School Characteristics.csv')
/tmp/ipython-input-2248053941.py:1: DtypeWarning: Columns (2,6) have mixed types. Specify dtype option on import or set low_memory=False.
df = pd.read_csv('/content/drive/MyDrive/github/cal_school/sources/2021-22-crdc-data/SCH/School Characteristics.csv')
In [ ]:
df_filtered = df[
(df['LEA_STATE_NAME'] == 'CALIFORNIA') &
(df['SCH_STATUS_CHARTER'] == 'Yes')
]
columns_to_keep = ['LEAID', 'LEA_NAME', 'SCHID', 'SCH_NAME', 'COMBOKEY']
df_charter = df_filtered[columns_to_keep]
print(df_charter.head())
print(df_charter.shape)
LEAID LEA_NAME SCHID SCH_NAME \
5047 600011 Fort Sage Unified 12763 Mt. Lassen Charter
5276 600034 Windsor Unified 6983 Cali Calmecac Language Academy
5297 600036 Natomas Unified 11087 Westlake Charter
5299 600036 Natomas Unified 11735 Natomas Pacific Pathways Prep
5301 600036 Natomas Unified 12523 Natomas Pacific Pathways Prep Middle
COMBOKEY
5047 60001112763
5276 60003406983
5297 60003611087
5299 60003611735
5301 60003612523
(1271, 5)
In [ ]:
edge = pd.read_excel('/content/drive/MyDrive/github/cal_school/sources/EDGE_GEOCODE_PUBLICSCH_2122.xlsx', dtype=str)
edge_ca = edge[edge['STATE'].str.strip().str.upper() == 'CA']
# 2) Normalize keys: drop leading zeros
edge_ca = edge_ca.copy()
edge_ca['NCESSCH_norm'] = edge_ca['NCESSCH'].str.strip().str.lstrip('0')
df_charter = df_charter.copy()
df_charter['COMBOKEY_norm'] = (
df_charter['COMBOKEY'].astype(str).str.strip().str.lstrip('0')
)
# 3) Merge on COMBOKEY <-> NCESSCH
merged = df_charter.merge(
edge_ca[['NCESSCH','NCESSCH_norm','NMCNTY','CITY','LOCALE','LAT','LON']],
left_on='COMBOKEY_norm',
right_on='NCESSCH_norm',
how='left'
)
# 4) Keep columns
out_cols = ['LEAID','LEA_NAME','SCHID','SCH_NAME','CITY','COMBOKEY','NMCNTY','LOCALE','LAT','LON']
out = merged[out_cols]
print(f"Rows in df_charter: {len(df_charter)}")
print(f"Rows matched with geocodes: {out['LAT'].notna().sum()}")
print(out.head())
Rows in df_charter: 1271
Rows matched with geocodes: 1266
LEAID LEA_NAME SCHID SCH_NAME \
0 600011 Fort Sage Unified 12763 Mt. Lassen Charter
1 600034 Windsor Unified 6983 Cali Calmecac Language Academy
2 600036 Natomas Unified 11087 Westlake Charter
3 600036 Natomas Unified 11735 Natomas Pacific Pathways Prep
4 600036 Natomas Unified 12523 Natomas Pacific Pathways Prep Middle
CITY COMBOKEY NMCNTY LOCALE LAT LON
0 Herlong 60001112763 Lassen County 33 40.4211 -120.650932
1 Windsor 60003406983 Sonoma County 21 38.550242 -122.82712
2 Sacramento 60003611087 Sacramento County 11 38.67564 -121.526258
3 Sacramento 60003611735 Sacramento County 11 38.6551 -121.546082
4 Sacramento 60003612523 Sacramento County 11 38.6551 -121.546082
In [ ]:
print(out['CITY'].astype(str).str.strip().str.casefold().value_counts())
CITY
los angeles 175
san jose 52
san diego 49
oakland 38
sacramento 32
...
beale afb 1
pittsburg 1
yerington 1
san rafael 1
freedom 1
Name: count, Length: 331, dtype: int64
In [ ]:
out_path = '/content/drive/MyDrive/github/cal_school/export/CA_charter_with_geo.csv'
out.to_csv(out_path, index=False)
print(f"Saved: {out_path}")
Saved: /content/drive/MyDrive/github/cal_school/export/CA_charter_with_geo.csv
In [ ]:
%cd /content/drive/MyDrive/github/cal_school
!git config user.name "avahuu"
!git config user.email "xmhu312@gmail.com"
!git rm -r --cached -f sources/
!grep -qxF "sources/" .gitignore || echo "sources/" >> .gitignore
!git add -A
!git commit -m "chore: ignore sources/ and untrack it; update notebook and export"
/content/drive/MyDrive/github/cal_school fatal: pathspec 'sources/' did not match any files On branch main Your branch is ahead of 'origin/main' by 1 commit. (use "git push" to publish your local commits) nothing to commit, working tree clean
In [57]:
!jupyter nbconvert script.ipynb.ipynb --to html --output index.html
[NbConvertApp] WARNING | pattern 'myfile.ipynb' matched no files
This application is used to convert notebook files (*.ipynb)
to various other formats.
WARNING: THE COMMANDLINE INTERFACE MAY CHANGE IN FUTURE RELEASES.
Options
=======
The options below are convenience aliases to configurable class-options,
as listed in the "Equivalent to" description-line of the aliases.
To see all configurable class-options for some <cmd>, use:
<cmd> --help-all
--debug
set log level to logging.DEBUG (maximize logging output)
Equivalent to: [--Application.log_level=10]
--show-config
Show the application's configuration (human-readable format)
Equivalent to: [--Application.show_config=True]
--show-config-json
Show the application's configuration (json format)
Equivalent to: [--Application.show_config_json=True]
--generate-config
generate default config file
Equivalent to: [--JupyterApp.generate_config=True]
-y
Answer yes to any questions instead of prompting.
Equivalent to: [--JupyterApp.answer_yes=True]
--execute
Execute the notebook prior to export.
Equivalent to: [--ExecutePreprocessor.enabled=True]
--allow-errors
Continue notebook execution even if one of the cells throws an error and include the error message in the cell output (the default behaviour is to abort conversion). This flag is only relevant if '--execute' was specified, too.
Equivalent to: [--ExecutePreprocessor.allow_errors=True]
--stdin
read a single notebook file from stdin. Write the resulting notebook with default basename 'notebook.*'
Equivalent to: [--NbConvertApp.from_stdin=True]
--stdout
Write notebook output to stdout instead of files.
Equivalent to: [--NbConvertApp.writer_class=StdoutWriter]
--inplace
Run nbconvert in place, overwriting the existing notebook (only
relevant when converting to notebook format)
Equivalent to: [--NbConvertApp.use_output_suffix=False --NbConvertApp.export_format=notebook --FilesWriter.build_directory=]
--clear-output
Clear output of current file and save in place,
overwriting the existing notebook.
Equivalent to: [--NbConvertApp.use_output_suffix=False --NbConvertApp.export_format=notebook --FilesWriter.build_directory= --ClearOutputPreprocessor.enabled=True]
--coalesce-streams
Coalesce consecutive stdout and stderr outputs into one stream (within each cell).
Equivalent to: [--NbConvertApp.use_output_suffix=False --NbConvertApp.export_format=notebook --FilesWriter.build_directory= --CoalesceStreamsPreprocessor.enabled=True]
--no-prompt
Exclude input and output prompts from converted document.
Equivalent to: [--TemplateExporter.exclude_input_prompt=True --TemplateExporter.exclude_output_prompt=True]
--no-input
Exclude input cells and output prompts from converted document.
This mode is ideal for generating code-free reports.
Equivalent to: [--TemplateExporter.exclude_output_prompt=True --TemplateExporter.exclude_input=True --TemplateExporter.exclude_input_prompt=True]
--allow-chromium-download
Whether to allow downloading chromium if no suitable version is found on the system.
Equivalent to: [--WebPDFExporter.allow_chromium_download=True]
--disable-chromium-sandbox
Disable chromium security sandbox when converting to PDF..
Equivalent to: [--WebPDFExporter.disable_sandbox=True]
--show-input
Shows code input. This flag is only useful for dejavu users.
Equivalent to: [--TemplateExporter.exclude_input=False]
--embed-images
Embed the images as base64 dataurls in the output. This flag is only useful for the HTML/WebPDF/Slides exports.
Equivalent to: [--HTMLExporter.embed_images=True]
--sanitize-html
Whether the HTML in Markdown cells and cell outputs should be sanitized..
Equivalent to: [--HTMLExporter.sanitize_html=True]
--log-level=<Enum>
Set the log level by value or name.
Choices: any of [0, 10, 20, 30, 40, 50, 'DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL']
Default: 30
Equivalent to: [--Application.log_level]
--config=<Unicode>
Full path of a config file.
Default: ''
Equivalent to: [--JupyterApp.config_file]
--to=<Unicode>
The export format to be used, either one of the built-in formats
['asciidoc', 'custom', 'html', 'latex', 'markdown', 'notebook', 'pdf', 'python', 'qtpdf', 'qtpng', 'rst', 'script', 'slides', 'webpdf']
or a dotted object name that represents the import path for an
``Exporter`` class
Default: ''
Equivalent to: [--NbConvertApp.export_format]
--template=<Unicode>
Name of the template to use
Default: ''
Equivalent to: [--TemplateExporter.template_name]
--template-file=<Unicode>
Name of the template file to use
Default: None
Equivalent to: [--TemplateExporter.template_file]
--theme=<Unicode>
Template specific theme(e.g. the name of a JupyterLab CSS theme distributed
as prebuilt extension for the lab template)
Default: 'light'
Equivalent to: [--HTMLExporter.theme]
--sanitize_html=<Bool>
Whether the HTML in Markdown cells and cell outputs should be sanitized.This
should be set to True by nbviewer or similar tools.
Default: False
Equivalent to: [--HTMLExporter.sanitize_html]
--writer=<DottedObjectName>
Writer class used to write the
results of the conversion
Default: 'FilesWriter'
Equivalent to: [--NbConvertApp.writer_class]
--post=<DottedOrNone>
PostProcessor class used to write the
results of the conversion
Default: ''
Equivalent to: [--NbConvertApp.postprocessor_class]
--output=<Unicode>
Overwrite base name use for output files.
Supports pattern replacements '{notebook_name}'.
Default: '{notebook_name}'
Equivalent to: [--NbConvertApp.output_base]
--output-dir=<Unicode>
Directory to write output(s) to. Defaults
to output to the directory of each notebook. To recover
previous default behaviour (outputting to the current
working directory) use . as the flag value.
Default: ''
Equivalent to: [--FilesWriter.build_directory]
--reveal-prefix=<Unicode>
The URL prefix for reveal.js (version 3.x).
This defaults to the reveal CDN, but can be any url pointing to a copy
of reveal.js.
For speaker notes to work, this must be a relative path to a local
copy of reveal.js: e.g., "reveal.js".
If a relative path is given, it must be a subdirectory of the
current directory (from which the server is run).
See the usage documentation
(https://nbconvert.readthedocs.io/en/latest/usage.html#reveal-js-html-slideshow)
for more details.
Default: ''
Equivalent to: [--SlidesExporter.reveal_url_prefix]
--nbformat=<Enum>
The nbformat version to write.
Use this to downgrade notebooks.
Choices: any of [1, 2, 3, 4]
Default: 4
Equivalent to: [--NotebookExporter.nbformat_version]
Examples
--------
The simplest way to use nbconvert is
> jupyter nbconvert mynotebook.ipynb --to html
Options include ['asciidoc', 'custom', 'html', 'latex', 'markdown', 'notebook', 'pdf', 'python', 'qtpdf', 'qtpng', 'rst', 'script', 'slides', 'webpdf'].
> jupyter nbconvert --to latex mynotebook.ipynb
Both HTML and LaTeX support multiple output templates. LaTeX includes
'base', 'article' and 'report'. HTML includes 'basic', 'lab' and
'classic'. You can specify the flavor of the format used.
> jupyter nbconvert --to html --template lab mynotebook.ipynb
You can also pipe the output to stdout, rather than a file
> jupyter nbconvert mynotebook.ipynb --stdout
PDF is generated via latex
> jupyter nbconvert mynotebook.ipynb --to pdf
You can get (and serve) a Reveal.js-powered slideshow
> jupyter nbconvert myslides.ipynb --to slides --post serve
Multiple notebooks can be given at the command line in a couple of
different ways:
> jupyter nbconvert notebook*.ipynb
> jupyter nbconvert notebook1.ipynb notebook2.ipynb
or you can specify the notebooks list in a config file, containing::
c.NbConvertApp.notebooks = ["my_notebook.ipynb"]
> jupyter nbconvert --config mycfg.py
To see all available configurables, use `--help-all`.
In [56]:
import getpass, urllib.parse, subprocess, os
REPO_OWNER = "avahuu"
REPO_NAME = "cal_school"
BRANCH = subprocess.check_output(["git","rev-parse","--abbrev-ref","HEAD"]).decode().strip()
!git add -A
!git commit -m "update from Colab" || echo "no changes to commit"
pat = urllib.parse.quote(getpass.getpass("Paste PAT (not saved): ").strip(), safe="")
auth = f"https://{REPO_OWNER}:{pat}@github.com/{REPO_OWNER}/{REPO_NAME}.git"
!git remote set-url origin "$auth"
!git push origin $BRANCH
!git remote set-url origin "https://github.com/{REPO_OWNER}/{REPO_NAME}.git"
[main 4987366] update from Colab 1 file changed, 1 insertion(+), 1 deletion(-) Paste PAT (not saved): ·········· Enumerating objects: 5, done. Counting objects: 100% (5/5), done. Delta compression using up to 2 threads Compressing objects: 100% (3/3), done. Writing objects: 100% (3/3), 690 bytes | 86.00 KiB/s, done. Total 3 (delta 2), reused 0 (delta 0), pack-reused 0 remote: Resolving deltas: 100% (2/2), completed with 2 local objects. To https://github.com/avahuu/cal_school.git 3d0ef11..4987366 main -> main