schemapile / convert_to_hf.py
cwolff's picture
fix: Now all columns work
3faa7ee
#!/usr/bin/env python3
"""
Transform repo-style schema JSON into a Hugging Face–friendly "long list":
- Top-level keys like "002125_filename" are moved into each record's INFO:
INFO.ID = "002125"
INFO.FILENAME = "filename"
- Normalize tables and columns to consistent lists (not dicts).
- Ensure presence of standard fields with sensible defaults.
- Output as JSON (list) or JSON Lines.
Usage:
python transform_to_hf.py input.json -o output.json # JSON array
python transform_to_hf.py input.json -o output.jsonl --jsonl # JSONL
"""
import argparse
import json
import sys
from typing import Any, Dict, List, Optional
import gzip
# -------- Defaults & helpers --------
DEFAULT_COLUMN = {
"TYPE": None,
"NULLABLE": True,
"UNIQUE": False,
"DEFAULT": None,
"CHECKS": [],
"IS_PRIMARY": False,
"IS_INDEX": False,
"VALUES": None,
}
DEFAULT_TABLE = {
"PRIMARY_KEYS": [],
"FOREIGN_KEYS": [],
"CHECKS": [],
"INDEXES": [],
}
from typing import Tuple, Set
from huggingface_hub import HfApi, HfFolder
from datasets import Dataset
def _normalize_index_like(x) -> List[str]:
"""
Normalize index-like fields (INDEXES, PRIMARY_KEYS) into a list[str].
- Single column indexes expressed as ["col"] or [["col"]] -> ["col"]
- Composite indexes like ["a","b"] or [["a","b"]] -> ["a,b"]
- Scalars pass through -> ["scalar"]
"""
if x is None:
return []
if not isinstance(x, list):
x = [x]
out: List[str] = []
for item in x:
if item is None:
continue
if isinstance(item, (list, tuple, set)):
flat = [_strify_scalar(v) for v in item if v is not None]
if len(flat) == 1:
out.append(flat[0])
elif len(flat) > 1:
out.append(",".join(flat))
else:
out.append(_strify_scalar(item))
return out
def _strify_scalar(x):
if x is None:
return None
if isinstance(x, (dict, list, tuple, set)):
# stable textual form for complex values
return json.dumps(x, ensure_ascii=False)
return str(x)
def _strify_list(x):
if x is None:
return None
if not isinstance(x, list):
x = [x]
return [_strify_scalar(v) for v in x]
def split_id_filename(key: str):
"""Split '002125_filename.ext' into ('002125', 'filename.ext') if possible."""
if "_" in key:
id_part, filename = key.split("_", 1)
return id_part, filename
return None, key # no obvious ID; put the whole key as filename
def norm_list(x, *, default_empty_list=True):
if x is None:
return [] if default_empty_list else None
if isinstance(x, list):
return x
# tolerate single item -> list
return [x]
def coalesce(a, b):
"""Merge dictionaries with 'a' taking precedence where keys overlap."""
out = dict(b or {})
out.update(a or {})
return out
def normalize_column(col_name: str, col_payload: Dict[str, Any]) -> Dict[str, Any]:
# Normalize keys, handle weird VALUES’ key variants
payload = {}
weird_values_keys = [k for k in col_payload.keys() if str(k).strip("’'\"").upper() == "VALUES"]
for k, v in (col_payload or {}).items():
key_up = str(k).strip().upper().strip("’\"")
if key_up == "VALUES" or k in weird_values_keys:
payload["VALUES"] = v
else:
payload[key_up] = v
base = DEFAULT_COLUMN.copy()
base.update(payload)
# Coerce heterogeneous fields to stable types
checks = _strify_list(base.get("CHECKS")) or []
values = _strify_list(base.get("VALUES")) # None or list[str]
default_val = _strify_scalar(base.get("DEFAULT"))
ctype = _strify_scalar(base.get("TYPE"))
normalized = {
"NAME": col_name,
"TYPE": ctype,
"NULLABLE": bool(base.get("NULLABLE", True)),
"UNIQUE": bool(base.get("UNIQUE", False)),
"DEFAULT": default_val,
"CHECKS": checks,
"IS_PRIMARY": bool(base.get("IS_PRIMARY", False)),
"IS_INDEX": bool(base.get("IS_INDEX", False)),
"VALUES": values,
}
return normalized
def normalize_table(table_name: str, table_payload: Dict[str, Any]) -> Dict[str, Any]:
tp = {(k.strip().upper() if isinstance(k, str) else k): v
for k, v in (table_payload or {}).items()}
columns_obj = tp.get("COLUMNS", {}) or {}
columns_list: List[Dict[str, Any]] = []
if isinstance(columns_obj, dict):
for col_name, col_payload in columns_obj.items():
columns_list.append(normalize_column(str(col_name), col_payload or {}))
elif isinstance(columns_obj, list):
for c in columns_obj:
if isinstance(c, dict):
col_name = (
c.get("NAME") or c.get("name") or
c.get("COLUMN_NAME") or c.get("column_name") or "unknown"
)
columns_list.append(normalize_column(str(col_name), c or {}))
base = DEFAULT_TABLE.copy()
base["PRIMARY_KEYS"] = list(base.get("PRIMARY_KEYS", [])) + list(tp.get("PRIMARY_KEYS", []) or [])
base["FOREIGN_KEYS"] = list(tp.get("FOREIGN_KEYS", []) or [])
base["CHECKS"] = list(tp.get("CHECKS", []) or [])
base["INDEXES"] = list(tp.get("INDEXES", []) or [])
# Normalize FKs
norm_fks = []
for fk in base["FOREIGN_KEYS"]:
if not isinstance(fk, dict):
continue
fk_up = {(k.strip().upper() if isinstance(k, str) else k): v for k, v in fk.items()}
norm_fks.append({
"COLUMNS": _strify_list(fk_up.get("COLUMNS")) or [],
"FOREIGN_TABLE": _strify_scalar(fk_up.get("FOREIGN_TABLE")),
"REFERRED_COLUMNS": _strify_list(fk_up.get("REFERRED_COLUMNS")) or [],
"ON_DELETE": _strify_scalar(fk_up.get("ON_DELETE")),
"ON_UPDATE": _strify_scalar(fk_up.get("ON_UPDATE")),
})
# ✅ Use the new normalizer here
norm_pks = _normalize_index_like(base["PRIMARY_KEYS"])
norm_indexes = _normalize_index_like(base["INDEXES"])
return {
"TABLE_NAME": table_name,
"COLUMNS": columns_list,
"PRIMARY_KEYS": norm_pks,
"FOREIGN_KEYS": norm_fks,
"CHECKS": _strify_list(base["CHECKS"]) or [],
"INDEXES": norm_indexes,
}
def normalize_record(key: str, payload: Dict[str, Any]) -> Dict[str, Any]:
id_part, filename = split_id_filename(key)
# Merge INFO with synthesized fields
info_in = payload.get("INFO", {}) or {}
info_norm = dict(info_in) # shallow copy
if id_part:
info_norm.setdefault("ID", id_part)
info_norm.setdefault("FILENAME", filename)
# Normalize TABLES -> list of tables
tables_obj = payload.get("TABLES", {}) or {}
tables_list = []
if isinstance(tables_obj, dict):
for tname, tpayload in tables_obj.items():
tables_list.append(normalize_table(tname, tpayload or {}))
elif isinstance(tables_obj, list):
# Already a list; ensure each is normalized and has TABLE_NAME
for t in tables_obj:
if isinstance(t, dict):
tname = t.get("TABLE_NAME") or t.get("name") or "unknown"
tables_list.append(normalize_table(tname, t))
normalized = {
"ID": info_norm.get("ID", None),
"FILENAME": info_norm.get("FILENAME", None),
"URL": info_in.get("URL", None),
"LICENSE": info_in["LICENSE"] if "LICENSE" in info_in else "UNKNOWN",
"PERMISSIVE": bool(info_in.get("PERMISSIVE", False)),
"TABLES": tables_list,
}
return normalized
def transform(data: Dict[str, Any]) -> List[Dict[str, Any]]:
if not isinstance(data, dict):
raise ValueError("Expected top-level object to be a dict mapping keys like '002125_filename' to records.")
out: List[Dict[str, Any]] = []
for key, payload in data.items():
out.append(normalize_record(str(key), payload or {}))
return out
# -------- CLI --------
def main():
with gzip.open("schemapile-perm.json.gz", "rt", encoding="utf-8") as f:
data = json.loads(f.read())
records = transform(data)
# Emit
fh = open("data.jsonl", "w", encoding="utf-8")
close = True
try:
for rec in records:
fh.write(json.dumps(rec, ensure_ascii=False) + "\n")
finally:
if close:
fh.close()
# Upload to Hugging Face Hub using the datasets library
# Load the JSONL file into a Hugging Face Dataset
dataset = Dataset.from_json("data.jsonl")
if __name__ == "__main__":
main()