1041 lines
30 KiB
Python
1041 lines
30 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Sync Flux function help in ui/src/flux/constants/functions.ts with local docs.
|
|
|
|
Usage:
|
|
python scripts/flux-help-sync.py
|
|
python scripts/flux-help-sync.py --tag-undocumented
|
|
python scripts/flux-help-sync.py --prune-undocumented
|
|
python scripts/flux-help-sync.py --prune-removed
|
|
python scripts/flux-help-sync.py --docs-v2-repo-dir ../docs-v2
|
|
python scripts/flux-help-sync.py --output /path/to/flux-help.diff
|
|
|
|
Output:
|
|
Writes a unified diff to flux-help.diff in the repo root by default.
|
|
|
|
Generated by Codex CLI agent, model: gpt-5.2-codex high.
|
|
"""
|
|
import argparse
|
|
import json
|
|
import re
|
|
import subprocess
|
|
from difflib import unified_diff
|
|
from pathlib import Path
|
|
|
|
|
|
WRAP_LEN = 68
|
|
VARIANT_SPECS = {
|
|
("csv.from", "csv"): [
|
|
{
|
|
"suffix": "file",
|
|
"params": ["file"],
|
|
"example": 'csv.from(file: path)',
|
|
},
|
|
{
|
|
"suffix": "csvData",
|
|
"params": ["csv"],
|
|
"example": "csv.from(csv: csvData)",
|
|
},
|
|
],
|
|
("csv.from", "experimental/csv"): [
|
|
{
|
|
"suffix": "url",
|
|
"params": ["url"],
|
|
"example": 'csv.from(url: "http://example.com/data.csv")',
|
|
},
|
|
],
|
|
("v1.json", "influxdata/influxdb/v1"): [
|
|
{
|
|
"suffix": "file",
|
|
"params": ["file"],
|
|
"example": "v1.json(file: path)",
|
|
},
|
|
{
|
|
"suffix": "jsonData",
|
|
"params": ["json"],
|
|
"example": "v1.json(json: jsonData)",
|
|
},
|
|
],
|
|
}
|
|
|
|
|
|
def parse_front_matter(text):
|
|
if not text.startswith("---"):
|
|
return {}, text
|
|
parts = text.split("---", 2)
|
|
if len(parts) < 3:
|
|
return {}, text
|
|
fm_text = parts[1]
|
|
body = parts[2]
|
|
fm = {}
|
|
lines = fm_text.splitlines()
|
|
i = 0
|
|
while i < len(lines):
|
|
line = lines[i]
|
|
if not line.strip() or line.strip().startswith("#"):
|
|
i += 1
|
|
continue
|
|
if ":" in line:
|
|
key, value = line.split(":", 1)
|
|
key = key.strip()
|
|
value = value.strip()
|
|
if value in (">", "|"):
|
|
i += 1
|
|
block = []
|
|
while i < len(lines):
|
|
l = lines[i]
|
|
if l.startswith(" "):
|
|
block.append(l.strip())
|
|
i += 1
|
|
else:
|
|
break
|
|
fm[key] = " ".join(block).strip()
|
|
continue
|
|
fm[key] = value.strip().strip('"')
|
|
i += 1
|
|
return fm, body
|
|
|
|
|
|
def parse_tags(text):
|
|
m = re.search(r"^flux/v0/tags:\s*\[([^\]]*)\]", text, flags=re.M)
|
|
if not m:
|
|
return []
|
|
raw = m.group(1)
|
|
parts = [p.strip() for p in raw.split(",") if p.strip()]
|
|
return [p.strip("\"'") for p in parts]
|
|
|
|
|
|
def parse_description(fm):
|
|
return fm.get("description", "")
|
|
|
|
|
|
def parse_first_paragraph(body):
|
|
# Strip the autogenerated comment block if present.
|
|
body = re.sub(r"<!-+.*?-+>", "", body, flags=re.S)
|
|
lines = body.splitlines()
|
|
para = []
|
|
in_code = False
|
|
for line in lines:
|
|
raw = line.strip()
|
|
if raw.startswith("```"):
|
|
in_code = not in_code
|
|
continue
|
|
if in_code:
|
|
continue
|
|
if not raw:
|
|
if para:
|
|
break
|
|
continue
|
|
if raw.startswith(("#", "{{", "- ", "* ", "|", ">")):
|
|
if para:
|
|
break
|
|
continue
|
|
if raw.startswith("<") and raw.endswith(">"):
|
|
if para:
|
|
break
|
|
continue
|
|
para.append(raw)
|
|
return " ".join(para).strip()
|
|
|
|
|
|
def is_prelude(body):
|
|
return bool(
|
|
re.search(
|
|
r"does not require a package import|Flux prelude",
|
|
body,
|
|
flags=re.I,
|
|
)
|
|
)
|
|
|
|
|
|
def is_deprecated(fm, body):
|
|
deprecated = fm.get("deprecated")
|
|
if isinstance(deprecated, str) and deprecated.strip():
|
|
return True
|
|
if deprecated not in (None, "", False):
|
|
return True
|
|
return bool(re.search(r"\bdeprecated\b", body, flags=re.I))
|
|
|
|
|
|
def parse_parameters(body):
|
|
params = []
|
|
param_desc = {}
|
|
m = re.search(r"^## Parameters\s*$", body, flags=re.M)
|
|
if not m:
|
|
return params, param_desc
|
|
start = m.end()
|
|
m2 = re.search(r"^##\s+\S", body[start:], flags=re.M)
|
|
section = body[start:] if not m2 else body[start : start + m2.start()]
|
|
matches = list(re.finditer(r"^###\s+([^\n]+)$", section, flags=re.M))
|
|
for idx, match in enumerate(matches):
|
|
name = match.group(1).strip()
|
|
params.append(name)
|
|
p_start = match.end()
|
|
p_end = matches[idx + 1].start() if idx + 1 < len(matches) else len(section)
|
|
body_part = section[p_start:p_end]
|
|
desc_line = ""
|
|
for line in body_part.splitlines():
|
|
line = line.strip()
|
|
if not line:
|
|
continue
|
|
if line.startswith("({{< req >}})") or line.startswith(
|
|
"({{< req "
|
|
):
|
|
continue
|
|
if line.startswith("({{< "):
|
|
continue
|
|
desc_line = line
|
|
break
|
|
param_desc[name] = desc_line
|
|
return params, param_desc
|
|
|
|
|
|
def parse_signature(body):
|
|
m = re.search(r"Function type signature\s*\n\n```js\n(.*?)```", body, flags=re.S)
|
|
if not m:
|
|
return "", {}
|
|
sig = m.group(1).strip()
|
|
start = sig.find("(")
|
|
if start == -1:
|
|
return sig, {}
|
|
depth = 0
|
|
start_idx = None
|
|
end_idx = None
|
|
for i, ch in enumerate(sig[start:], start):
|
|
if ch == "(":
|
|
if depth == 0:
|
|
start_idx = i + 1
|
|
depth += 1
|
|
elif ch == ")":
|
|
depth -= 1
|
|
if depth == 0:
|
|
end_idx = i
|
|
break
|
|
if start_idx is None or end_idx is None:
|
|
return sig, {}
|
|
param_str = sig[start_idx:end_idx]
|
|
params = []
|
|
buf = ""
|
|
paren = brack = brace = 0
|
|
for ch in param_str:
|
|
if ch == "(":
|
|
paren += 1
|
|
elif ch == ")":
|
|
paren -= 1
|
|
elif ch == "[":
|
|
brack += 1
|
|
elif ch == "]":
|
|
brack -= 1
|
|
elif ch == "{":
|
|
brace += 1
|
|
elif ch == "}":
|
|
brace -= 1
|
|
if ch == "," and paren == 0 and brack == 0 and brace == 0:
|
|
params.append(buf)
|
|
buf = ""
|
|
else:
|
|
buf += ch
|
|
if buf.strip():
|
|
params.append(buf)
|
|
param_types = {}
|
|
for entry in params:
|
|
entry = entry.strip()
|
|
if not entry:
|
|
continue
|
|
if entry.startswith("<-"):
|
|
entry = entry[2:].strip()
|
|
if entry.startswith("?"):
|
|
entry = entry[1:].strip()
|
|
if ":" not in entry:
|
|
continue
|
|
name, type_str = entry.split(":", 1)
|
|
name = name.strip()
|
|
type_str = type_str.strip()
|
|
param_types[name] = type_str
|
|
return sig, param_types
|
|
|
|
|
|
def map_type(type_str):
|
|
t = type_str.strip()
|
|
tl = t.lower()
|
|
if "->" in t or "=>" in t:
|
|
return "Function"
|
|
if tl.startswith("stream["):
|
|
return "Stream of tables"
|
|
if tl.startswith("{"):
|
|
return "Object"
|
|
if tl.startswith("[") or tl.startswith("array[") or tl.startswith("array"):
|
|
return "Array"
|
|
if tl.startswith("duration"):
|
|
return "Duration"
|
|
if tl.startswith("time"):
|
|
return "Time"
|
|
if tl.startswith("int"):
|
|
return "Integer"
|
|
if tl.startswith("uint"):
|
|
return "UInteger"
|
|
if tl.startswith("float"):
|
|
return "Float"
|
|
if tl.startswith("string"):
|
|
return "String"
|
|
if tl.startswith("bool"):
|
|
return "Boolean"
|
|
if tl.startswith("bytes"):
|
|
return "Bytes"
|
|
if tl.startswith("regexp"):
|
|
return "Regexp"
|
|
if "time" in tl:
|
|
return "Time"
|
|
if "duration" in tl:
|
|
return "Duration"
|
|
if "string" in tl:
|
|
return "String"
|
|
if "bool" in tl:
|
|
return "Boolean"
|
|
if "int" in tl:
|
|
return "Integer"
|
|
if "float" in tl:
|
|
return "Float"
|
|
return "Object"
|
|
|
|
|
|
def strip_line_comment(line):
|
|
in_string = None
|
|
i = 0
|
|
while i < len(line) - 1:
|
|
ch = line[i]
|
|
if in_string:
|
|
if ch == in_string:
|
|
in_string = None
|
|
elif ch == "\\":
|
|
i += 1
|
|
i += 1
|
|
continue
|
|
if ch in ("'", '"'):
|
|
in_string = ch
|
|
i += 1
|
|
continue
|
|
if ch == "/" and line[i + 1] == "/":
|
|
return line[:i].rstrip()
|
|
i += 1
|
|
return line.rstrip()
|
|
|
|
|
|
def in_string_at(text, idx):
|
|
in_string = None
|
|
i = 0
|
|
while i < idx:
|
|
ch = text[i]
|
|
if in_string:
|
|
if ch == in_string:
|
|
in_string = None
|
|
elif ch == "\\":
|
|
i += 1
|
|
i += 1
|
|
continue
|
|
if ch in ("'", '"'):
|
|
in_string = ch
|
|
i += 1
|
|
return in_string is not None
|
|
|
|
|
|
def extract_function_call(text, name):
|
|
target = name + "("
|
|
idx = 0
|
|
while True:
|
|
idx = text.find(target, idx)
|
|
if idx == -1:
|
|
return None
|
|
if in_string_at(text, idx):
|
|
idx += len(target)
|
|
continue
|
|
if idx > 0:
|
|
prev = text[idx - 1]
|
|
if prev.isalnum() or prev == "_" or prev == ".":
|
|
idx += len(target)
|
|
continue
|
|
i = idx + len(name)
|
|
depth = 0
|
|
in_string = None
|
|
for j in range(i, len(text)):
|
|
ch = text[j]
|
|
if in_string:
|
|
if ch == in_string:
|
|
in_string = None
|
|
elif ch == "\\":
|
|
j += 1
|
|
continue
|
|
if ch in ("'", '"'):
|
|
in_string = ch
|
|
continue
|
|
if ch == "(":
|
|
depth += 1
|
|
elif ch == ")":
|
|
depth -= 1
|
|
if depth == 0:
|
|
return text[idx : j + 1]
|
|
idx += len(target)
|
|
|
|
|
|
def squash_ws(text):
|
|
out = []
|
|
in_string = None
|
|
escape = False
|
|
prev_space = False
|
|
for ch in text:
|
|
if in_string:
|
|
out.append(ch)
|
|
if escape:
|
|
escape = False
|
|
elif ch == "\\":
|
|
escape = True
|
|
elif ch == in_string:
|
|
in_string = None
|
|
continue
|
|
if ch in ("'", '"'):
|
|
in_string = ch
|
|
out.append(ch)
|
|
prev_space = False
|
|
continue
|
|
if ch.isspace():
|
|
if not prev_space:
|
|
out.append(" ")
|
|
prev_space = True
|
|
continue
|
|
out.append(ch)
|
|
prev_space = False
|
|
return "".join(out).strip()
|
|
|
|
|
|
def remove_trailing_commas(text):
|
|
out = []
|
|
in_string = None
|
|
escape = False
|
|
i = 0
|
|
while i < len(text):
|
|
ch = text[i]
|
|
if in_string:
|
|
out.append(ch)
|
|
if escape:
|
|
escape = False
|
|
elif ch == "\\":
|
|
escape = True
|
|
elif ch == in_string:
|
|
in_string = None
|
|
i += 1
|
|
continue
|
|
if ch in ("'", '"'):
|
|
in_string = ch
|
|
out.append(ch)
|
|
i += 1
|
|
continue
|
|
if ch == ",":
|
|
j = i + 1
|
|
while j < len(text) and text[j].isspace():
|
|
j += 1
|
|
if j < len(text) and text[j] == ")":
|
|
i = j
|
|
continue
|
|
out.append(ch)
|
|
i += 1
|
|
return "".join(out)
|
|
|
|
|
|
def parse_example(body, name):
|
|
m = re.search(r"^## Examples\s*$", body, flags=re.M)
|
|
if not m:
|
|
return None
|
|
sub = body[m.end() :]
|
|
for cm in re.finditer(r"```js\n(.*?)```", sub, flags=re.S):
|
|
block = cm.group(1)
|
|
|
|
lines = []
|
|
for ln in block.splitlines():
|
|
ln_clean = strip_line_comment(ln)
|
|
if ln_clean.strip() == "" and ln.lstrip().startswith("//"):
|
|
continue
|
|
lines.append(ln_clean)
|
|
|
|
code = "\n".join(lines)
|
|
|
|
call = extract_function_call(code, name)
|
|
if call:
|
|
return remove_trailing_commas(squash_ws(call))
|
|
return None
|
|
|
|
|
|
def normalize_desc(text):
|
|
text = " ".join(text.split())
|
|
# Convert markdown links to visible text + URL for plain-text tooltips.
|
|
text = re.sub(r"\[([^\]]+)\]\(([^)]+)\)", r"\1 (\2)", text)
|
|
return text
|
|
|
|
|
|
def normalize_doc_desc(desc, name):
|
|
desc = normalize_desc(desc)
|
|
if not desc:
|
|
return desc
|
|
# Docs often prefix descriptions with the function name in backticks.
|
|
name_pattern = r"^`?%s\(\)`?\s+" % re.escape(name)
|
|
desc = re.sub(name_pattern, "", desc)
|
|
if desc and desc[0].islower():
|
|
desc = desc[0].upper() + desc[1:]
|
|
return desc
|
|
|
|
|
|
def parse_removed_functions(release_notes_path):
|
|
if not release_notes_path.exists():
|
|
return set()
|
|
removed = set()
|
|
text = release_notes_path.read_text()
|
|
for line in text.splitlines():
|
|
if not re.match(
|
|
r"^\s*[-*]\s*Remove(?:d)?(?:\s+the)?\s+\[?`?[A-Za-z0-9_.]+\(\)`?",
|
|
line,
|
|
flags=re.I,
|
|
):
|
|
continue
|
|
for name in re.findall(r"`?([A-Za-z0-9_.]+)\(\)`?", line):
|
|
removed.add(name)
|
|
return removed
|
|
|
|
|
|
def build_variant_docs(doc_map):
|
|
variant_docs = {}
|
|
for (base_name, base_package), variants in VARIANT_SPECS.items():
|
|
base_doc = doc_map.get((base_name, base_package))
|
|
if not base_doc:
|
|
continue
|
|
for variant in variants:
|
|
name = f"{base_name} ({variant['suffix']})"
|
|
args = []
|
|
for param in variant["params"]:
|
|
desc = base_doc["param_desc"].get(param, "")
|
|
arg_type = base_doc["type_map"].get(param) or "Object"
|
|
args.append(
|
|
{"name": param, "desc": normalize_desc(desc), "type": arg_type}
|
|
)
|
|
variant_docs[(name, base_package)] = {
|
|
"name": name,
|
|
"package": base_doc["package"],
|
|
"desc": base_doc["desc"],
|
|
"params": variant["params"],
|
|
"param_desc": base_doc["param_desc"],
|
|
"tables_piped": base_doc["tables_piped"],
|
|
"type_map": base_doc["type_map"],
|
|
"example": variant["example"],
|
|
"category": base_doc["category"],
|
|
"link": base_doc["link"],
|
|
"deprecated": base_doc.get("deprecated", False),
|
|
}
|
|
return variant_docs
|
|
|
|
|
|
def choose_preferred_packages(doc_map):
|
|
by_name = {}
|
|
for (name, _pkg), doc in doc_map.items():
|
|
by_name.setdefault(name, []).append(doc)
|
|
allowed = {}
|
|
for name, docs in by_name.items():
|
|
non_deprecated = [d for d in docs if not d.get("deprecated")]
|
|
candidates = non_deprecated or docs
|
|
if len(candidates) <= 1:
|
|
allowed[name] = {candidates[0]["package"]} if candidates else set()
|
|
continue
|
|
non_contrib = [
|
|
d for d in candidates if not d.get("package", "").startswith("contrib/")
|
|
]
|
|
if name == "from" and non_contrib:
|
|
if len(non_contrib) == 1:
|
|
allowed[name] = {non_contrib[0]["package"]}
|
|
continue
|
|
candidates = non_contrib
|
|
descs = {normalize_desc(d.get("desc", "")) for d in candidates}
|
|
if len(descs) > 1:
|
|
allowed[name] = {d["package"] for d in candidates}
|
|
continue
|
|
|
|
def score(doc):
|
|
pkg = doc.get("package", "")
|
|
return (
|
|
doc.get("deprecated", False),
|
|
pkg.startswith("experimental/"),
|
|
pkg.startswith("contrib/"),
|
|
pkg != "",
|
|
pkg,
|
|
)
|
|
|
|
best = min(candidates, key=score)
|
|
allowed[name] = {best["package"]}
|
|
return allowed
|
|
|
|
|
|
def derive_category(tags, package):
|
|
tags = [t.lower() for t in tags]
|
|
mapping = {
|
|
"inputs": "Inputs",
|
|
"outputs": "Outputs",
|
|
"transformations": "Transformations",
|
|
"aggregates": "Aggregates",
|
|
"selectors": "Selectors",
|
|
"dynamic queries": "Dynamic queries",
|
|
"dynamic-queries": "Dynamic queries",
|
|
"tests": "Tests",
|
|
"type-conversions": "Type Conversions",
|
|
"type conversions": "Type Conversions",
|
|
"date/time": "Date/time",
|
|
"metadata": "Metadata",
|
|
"notification endpoints": "Notification endpoints",
|
|
"notification-endpoints": "Notification endpoints",
|
|
"geotemporal": "Geotemporal",
|
|
}
|
|
for t in tags:
|
|
if t in mapping:
|
|
return mapping[t]
|
|
if package in ("date", "timezone"):
|
|
return "Date/time"
|
|
if package == "testing":
|
|
return "Tests"
|
|
if package == "types":
|
|
return "Type Conversions"
|
|
if package in ("geo", "experimental/geo"):
|
|
return "Geotemporal"
|
|
if any(
|
|
p in package
|
|
for p in [
|
|
"slack",
|
|
"pagerduty",
|
|
"opsgenie",
|
|
"victorops",
|
|
"sensu",
|
|
"teams",
|
|
"telegram",
|
|
"discord",
|
|
"servicenow",
|
|
"webexteams",
|
|
"bigpanda",
|
|
"alerta",
|
|
"pushbullet",
|
|
]
|
|
):
|
|
return "Notification endpoints"
|
|
return "Transformations"
|
|
|
|
|
|
def quote_js(s):
|
|
if s is None:
|
|
s = ""
|
|
sentinel = "__FLUX_HELP_NEWLINE__"
|
|
s = s.replace("\n", sentinel)
|
|
quote_char = "'"
|
|
if "'" in s and '"' not in s:
|
|
quote_char = '"'
|
|
s = s.replace("\\", "\\\\")
|
|
if quote_char == "'":
|
|
s = s.replace("'", "\\'")
|
|
else:
|
|
s = s.replace('"', '\\"')
|
|
s = s.replace(sentinel, "\\n")
|
|
return f"{quote_char}{s}{quote_char}"
|
|
|
|
|
|
def format_kv(
|
|
key,
|
|
value,
|
|
indent,
|
|
wrap=True,
|
|
max_len=80,
|
|
normalize_fn=normalize_desc,
|
|
):
|
|
ind = " " * indent
|
|
value = normalize_fn(value)
|
|
if not wrap:
|
|
return [f"{ind}{key}: {quote_js(value)},"]
|
|
single_line = f"{ind}{key}: {quote_js(value)},"
|
|
if len(value) <= max_len and len(single_line) <= 80:
|
|
return [single_line]
|
|
return [f"{ind}{key}:", f"{ind} {quote_js(value)},"]
|
|
|
|
|
|
def format_arg(arg, indent):
|
|
ind = " " * indent
|
|
lines = [f"{ind}{{"]
|
|
lines.append(f"{ind} name: {quote_js(arg['name'])},")
|
|
lines += format_kv(
|
|
"desc",
|
|
arg.get("desc", ""),
|
|
indent + 2,
|
|
max_len=WRAP_LEN - 5,
|
|
)
|
|
lines.append(f"{ind} type: {quote_js(arg.get('type', 'Object'))},")
|
|
lines.append(f"{ind}}},")
|
|
return lines
|
|
|
|
|
|
def format_entry(entry, indent, trailing_comma=True):
|
|
ind = " " * indent
|
|
lines = [f"{ind}{{"]
|
|
lines.append(f"{ind} name: {quote_js(entry['name'])},")
|
|
args = entry.get("args", [])
|
|
if not args:
|
|
lines.append(f"{ind} args: [],")
|
|
else:
|
|
lines.append(f"{ind} args: [")
|
|
for arg in args:
|
|
lines += format_arg(arg, indent + 4)
|
|
lines.append(f"{ind} ],")
|
|
lines.append(f"{ind} package: {quote_js(entry.get('package', ''))},")
|
|
lines += format_kv("desc", entry.get("desc", ""), indent + 2, max_len=WRAP_LEN)
|
|
lines += format_kv(
|
|
"example",
|
|
entry.get("example", ""),
|
|
indent + 2,
|
|
max_len=WRAP_LEN,
|
|
)
|
|
lines.append(f"{ind} category: {quote_js(entry.get('category', ''))},")
|
|
lines += format_kv("link", entry.get("link", ""), indent + 2, max_len=WRAP_LEN)
|
|
lines.append(f"{ind}}}{',' if trailing_comma else ''}")
|
|
return lines
|
|
|
|
|
|
def build_stub(entry):
|
|
desc = entry.get("desc", "").strip()
|
|
if desc:
|
|
if "undocumented" not in desc.lower():
|
|
desc = f"{desc} (Undocumented)"
|
|
else:
|
|
desc = "Undocumented function."
|
|
stub = dict(entry)
|
|
stub["desc"] = desc
|
|
return stub
|
|
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(
|
|
description="Sync Flux help entries from local docs."
|
|
)
|
|
parser.add_argument(
|
|
"--tag-undocumented",
|
|
action="store_true",
|
|
help="Tag functions without documentation pages as undocumented stubs.",
|
|
)
|
|
prune_group = parser.add_mutually_exclusive_group()
|
|
prune_group.add_argument(
|
|
"--prune-undocumented",
|
|
action="store_true",
|
|
help="Remove entries missing doc pages.",
|
|
)
|
|
prune_group.add_argument(
|
|
"--prune-removed",
|
|
action="store_true",
|
|
help="Remove entries missing doc pages only if listed as removed in release notes.",
|
|
)
|
|
parser.add_argument(
|
|
"--docs-v2-repo-dir",
|
|
default="../docs-v2",
|
|
help="Path to docs-v2 repository.",
|
|
)
|
|
parser.add_argument(
|
|
"--output",
|
|
default="flux-help.diff",
|
|
help="Path for unified diff output (default: flux-help.diff).",
|
|
)
|
|
args = parser.parse_args()
|
|
|
|
repo_root = Path(__file__).resolve().parents[1]
|
|
docs_root = (repo_root / Path(args.docs_v2_repo_dir).expanduser()).resolve()
|
|
stdlib_root = docs_root / "content/flux/v0/stdlib"
|
|
release_notes_path = docs_root / "content/flux/v0/release-notes.md"
|
|
functions_path = repo_root / "ui/src/flux/constants/functions.ts"
|
|
|
|
node_code = r"""
|
|
const fs = require('fs');
|
|
const vm = require('vm');
|
|
let src = fs.readFileSync('ui/src/flux/constants/functions.ts','utf8');
|
|
src = src.replace(/^import[^\n]*\n/gm,'');
|
|
src = src.replace(/export const (\w+):[^\n=]+=/g,'var $1 =');
|
|
src = src.replace(/export const (\w+) =/g,'var $1 =');
|
|
const context = {};
|
|
vm.createContext(context);
|
|
vm.runInContext(src, context);
|
|
// find exported const names
|
|
const exportNames = [];
|
|
const exportRe = /^export const (\w+)/gm;
|
|
let m;
|
|
while ((m = exportRe.exec(fs.readFileSync('ui/src/flux/constants/functions.ts','utf8'))) !== null) {
|
|
exportNames.push(m[1]);
|
|
}
|
|
// constants are exportNames except FUNCTIONS
|
|
const constants = {};
|
|
for (const name of exportNames) {
|
|
if (name === 'FUNCTIONS') continue;
|
|
constants[name] = context[name];
|
|
}
|
|
const result = { constants, functions: context.FUNCTIONS };
|
|
console.log(JSON.stringify(result));
|
|
"""
|
|
|
|
res = subprocess.run(
|
|
["node", "-e", node_code],
|
|
cwd=repo_root,
|
|
capture_output=True,
|
|
text=True,
|
|
check=True,
|
|
)
|
|
parsed = json.loads(res.stdout)
|
|
constants = parsed["constants"]
|
|
functions_list = parsed["functions"]
|
|
|
|
doc_map = {}
|
|
for path in stdlib_root.rglob("*.md"):
|
|
if path.name in ("_index.md", "all-functions.md"):
|
|
continue
|
|
text = path.read_text()
|
|
fm, body = parse_front_matter(text)
|
|
title = fm.get("title", "")
|
|
if "() " in title:
|
|
title = title.split("()")[0] + "()"
|
|
if "()" not in title:
|
|
continue
|
|
name = title.split("()")[0].strip()
|
|
rel = path.relative_to(stdlib_root)
|
|
if rel.parts[0] == "universe":
|
|
package = ""
|
|
else:
|
|
package = "/".join(rel.parts[:-1])
|
|
effective_package = "" if is_prelude(body) else package
|
|
first_para = parse_first_paragraph(body)
|
|
desc_source = first_para or parse_description(fm)
|
|
desc = normalize_doc_desc(desc_source, name)
|
|
tags = parse_tags(text)
|
|
params, param_desc = parse_parameters(body)
|
|
sig, sig_types = parse_signature(body)
|
|
tables_piped = "<-tables" in sig
|
|
type_map = {k: map_type(v) for k, v in sig_types.items()}
|
|
example = parse_example(body, name)
|
|
if not example:
|
|
example = ""
|
|
link = "https://docs.influxdata.com" + str(
|
|
"/flux/v0/stdlib/" + "/".join(rel.parts)
|
|
).replace(".md", "/")
|
|
doc_map[(name, effective_package)] = {
|
|
"name": name,
|
|
"package": effective_package,
|
|
"desc": desc,
|
|
"params": params,
|
|
"param_desc": param_desc,
|
|
"tables_piped": tables_piped,
|
|
"type_map": type_map,
|
|
"example": example,
|
|
"category": derive_category(tags, package),
|
|
"link": link,
|
|
"deprecated": is_deprecated(fm, body),
|
|
}
|
|
|
|
variant_docs = build_variant_docs(doc_map)
|
|
doc_map.update(variant_docs)
|
|
variant_bases = set(VARIANT_SPECS.keys())
|
|
allowed_packages = choose_preferred_packages(doc_map)
|
|
doc_map_full = dict(doc_map)
|
|
doc_map = {
|
|
key: doc
|
|
for key, doc in doc_map.items()
|
|
if key[0] not in allowed_packages
|
|
or key[1] in allowed_packages[key[0]]
|
|
}
|
|
|
|
existing_map = {}
|
|
for f in functions_list:
|
|
key = (f.get("name"), f.get("package"))
|
|
if key not in existing_map:
|
|
existing_map[key] = f
|
|
|
|
prune_keys = set()
|
|
if args.prune_undocumented or args.prune_removed:
|
|
missing_keys = set(existing_map.keys()) - set(doc_map_full.keys())
|
|
if args.prune_undocumented:
|
|
prune_keys = missing_keys
|
|
else:
|
|
removed_names = parse_removed_functions(release_notes_path)
|
|
prune_keys = {key for key in missing_keys if key[0] in removed_names}
|
|
dedupe_keys = {
|
|
key
|
|
for key in existing_map.keys()
|
|
if key[0] in allowed_packages and key[1] not in allowed_packages[key[0]]
|
|
}
|
|
prune_keys |= dedupe_keys
|
|
|
|
updated_map = {}
|
|
for key, f in existing_map.items():
|
|
if key in prune_keys:
|
|
continue
|
|
doc = doc_map.get(key)
|
|
if not doc:
|
|
updated_map[key] = build_stub(f) if args.tag_undocumented else f
|
|
continue
|
|
new_desc = doc["desc"] or f.get("desc", "")
|
|
arg_entries = []
|
|
existing_args = {a["name"]: a for a in f.get("args", [])}
|
|
for param in doc["params"]:
|
|
if param == "tables" and doc["tables_piped"]:
|
|
continue
|
|
existing = existing_args.get(param)
|
|
desc = doc["param_desc"].get(param) or (
|
|
existing.get("desc") if existing else ""
|
|
)
|
|
arg_type = (
|
|
(existing.get("type") if existing else "")
|
|
or doc["type_map"].get(param)
|
|
or "Object"
|
|
)
|
|
arg_entries.append(
|
|
{"name": param, "desc": normalize_desc(desc), "type": arg_type}
|
|
)
|
|
updated = {
|
|
"name": f.get("name"),
|
|
"args": arg_entries,
|
|
"package": f.get("package", ""),
|
|
"desc": new_desc,
|
|
"example": doc.get("example") or f.get("example", ""),
|
|
"category": f.get("category", doc["category"]),
|
|
"link": doc["link"],
|
|
}
|
|
updated_map[key] = updated
|
|
|
|
missing_entries = []
|
|
for key, doc in doc_map.items():
|
|
if key in updated_map:
|
|
continue
|
|
if key in variant_bases:
|
|
continue
|
|
arg_entries = []
|
|
for param in doc["params"]:
|
|
if param == "tables" and doc["tables_piped"]:
|
|
continue
|
|
desc = normalize_desc(doc["param_desc"].get(param, ""))
|
|
arg_type = doc["type_map"].get(param) or "Object"
|
|
arg_entries.append({"name": param, "desc": desc, "type": arg_type})
|
|
entry = {
|
|
"name": doc["name"],
|
|
"args": arg_entries,
|
|
"package": doc["package"],
|
|
"desc": doc["desc"],
|
|
"example": doc["example"] or f"{doc['name']}()",
|
|
"category": doc["category"],
|
|
"link": doc["link"],
|
|
}
|
|
missing_entries.append(entry)
|
|
|
|
missing_entries.sort(key=lambda e: e["name"])
|
|
|
|
const_names = []
|
|
for line in functions_path.read_text().splitlines():
|
|
m = re.match(r"^export const (\w+)", line)
|
|
if m:
|
|
name = m.group(1)
|
|
if name != "FUNCTIONS":
|
|
const_names.append(name)
|
|
else:
|
|
break
|
|
|
|
lines = []
|
|
lines.append("import {FluxToolbarFunction} from 'src/types/flux'")
|
|
lines.append("")
|
|
|
|
for const_name in const_names:
|
|
entry = constants[const_name]
|
|
key = (entry.get("name"), entry.get("package"))
|
|
if key in prune_keys:
|
|
continue
|
|
updated = updated_map.get(key, entry)
|
|
lines.append(f"export const {const_name}: FluxToolbarFunction = {{")
|
|
# Keep original 2-space indentation inside exported const blocks.
|
|
lines += format_entry(updated, 0, trailing_comma=False)[1:-1]
|
|
lines.append("}")
|
|
lines.append("")
|
|
|
|
lines.append("export const FUNCTIONS: FluxToolbarFunction[] = [")
|
|
|
|
for f in functions_list:
|
|
key = (f.get("name"), f.get("package"))
|
|
if key in prune_keys:
|
|
continue
|
|
updated = updated_map.get(key, f)
|
|
lines += format_entry(updated, 2, trailing_comma=True)
|
|
|
|
for entry in missing_entries:
|
|
lines += format_entry(entry, 2, trailing_comma=True)
|
|
|
|
lines.append("]")
|
|
lines.append("")
|
|
|
|
new_text = "\n".join(lines)
|
|
orig_text = functions_path.read_text()
|
|
|
|
diff = "\n".join(
|
|
unified_diff(
|
|
orig_text.splitlines(),
|
|
new_text.splitlines(),
|
|
fromfile="a/ui/src/flux/constants/functions.ts",
|
|
tofile="b/ui/src/flux/constants/functions.ts",
|
|
lineterm="",
|
|
)
|
|
)
|
|
if diff and not diff.endswith("\n"):
|
|
diff += "\n"
|
|
|
|
missing_keys = set(existing_map.keys()) - set(doc_map_full.keys())
|
|
if missing_keys:
|
|
missing_list = sorted(
|
|
(
|
|
f"{name} (package: '{pkg}')"
|
|
if pkg
|
|
else name
|
|
for name, pkg in missing_keys
|
|
),
|
|
key=lambda s: s.lower(),
|
|
)
|
|
print(
|
|
"Warning: the following help entries (functions) are missing "
|
|
f"documentation in {docs_root}:"
|
|
)
|
|
for item in missing_list:
|
|
print(f" {item}")
|
|
if args.prune_undocumented:
|
|
print("Note: these entries will be removed due to --prune-undocumented.")
|
|
elif args.prune_removed:
|
|
print("Note: these entries will be removed due to --prune-removed.")
|
|
else:
|
|
print("Note: these entries will remain as-is (no prune flag used).")
|
|
|
|
out_path = Path(args.output)
|
|
if not out_path.is_absolute():
|
|
out_path = repo_root / out_path
|
|
out_path.write_text(diff)
|
|
try:
|
|
out_display = out_path.relative_to(repo_root)
|
|
except ValueError:
|
|
out_display = out_path
|
|
try:
|
|
check = subprocess.run(
|
|
["git", "apply", "--check", str(out_path)],
|
|
cwd=repo_root,
|
|
capture_output=True,
|
|
text=True,
|
|
)
|
|
if check.returncode == 0:
|
|
print(f"Generated patch: {out_display} ({out_path})")
|
|
print("Patch check: ok")
|
|
print(f"Apply with: git apply {out_display}")
|
|
else:
|
|
msg = check.stderr.strip() or check.stdout.strip()
|
|
print(f"Generated patch: {out_display} ({out_path})")
|
|
print(f"Patch check failed: {msg}")
|
|
except FileNotFoundError:
|
|
print(f"Generated patch: {out_display} ({out_path})")
|
|
print("Patch check skipped: git not found")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|