import dispatch
+def register_device_commands(subparsers):
+ """
+ Register device-related commands:
+
+ device scan [--base-dir DIR]
+
+ For v1, we only support scanning already-mounted devices under /mnt.
+ """
+ ap = subparsers.add_parser("device")
+ ap.add_argument(
+ "action",
+ choices =["scan"],
+ )
+ ap.add_argument(
+ "--base-dir",
+ default ="/mnt",
+ help ="root under which to scan for <mapname>/user_data (default: /mnt)",
+ )
+
+
def register_db_commands(subparsers):
"""Register DB-related commands under 'db'.
"""Register subu related commands under 'subu':
subu make <masu> <subu> [<subu>]*
+ subu capture <masu> <subu> [<subu>]*
subu remove <Subu_ID> | <masu> <subu> [<subu>]*
subu list
subu info <Subu_ID> | <masu> <subu> [<subu>]*
+ subu option set|clear incommon <Subu_ID> | <masu> <subu> [<subu>]*
"""
ap_subu = subparsers.add_parser("subu")
subu_sub = ap_subu.add_subparsers(dest="subu_verb")
ap = subu_sub.add_parser("make")
ap.add_argument("path", nargs="+")
+ # capture: path[0] is masu, remaining elements are the subu chain
+ ap = subu_sub.add_parser("capture")
+ ap.add_argument("path", nargs="+")
+
# remove: either ID or path
ap = subu_sub.add_parser("remove")
ap.add_argument("target")
ap.add_argument("target")
ap.add_argument("rest", nargs="*")
+ # option incommon
+ ap = subu_sub.add_parser("option")
+ ap.add_argument("opt_action", choices=["set", "clear"])
+ ap.add_argument("opt_name", choices=["incommon"])
+ ap.add_argument("target")
+ ap.add_argument("rest", nargs="*")
+
def register_wireguard_commands(subparsers):
- """Register WireGuard related commands, grouped under 'WG':
-
- WG global <BaseCIDR>
- WG make <host:port>
- WG server_provided_public_key <WG_ID> <Base64Key>
- WG info|information <WG_ID>
- WG up|down <WG_ID>
- """
+ """Register WireGuard related commands, grouped under 'WG'."""
ap = subparsers.add_parser("WG")
ap.add_argument(
"wg_verb",
def register_option_commands(subparsers):
- """Register option commands.
+ """Register global option commands (non-subu-specific for now):
- Current surface:
- option Unix <mode> # e.g. dry|run
+ option set|get|list ...
"""
ap = subparsers.add_parser("option")
- ap.add_argument("area", choices=["Unix"])
- ap.add_argument("mode")
+ ap.add_argument("action", choices=["set", "get", "list"])
+ ap.add_argument("subu_id")
+ ap.add_argument("name", nargs="?")
+ ap.add_argument("value", nargs="?")
def register_exec_commands(subparsers):
"""
ap = subparsers.add_parser("exec")
ap.add_argument("subu_id")
- # Use a dedicated "--" argument so that:
- # CLI.py exec subu_7 -- curl -4v https://ifconfig.me
- # works as before.
ap.add_argument("--", dest="cmd", nargs=argparse.REMAINDER, default=[])
+def register_lo_commands(subparsers):
+ """Register lo command:
+
+ lo up|down <Subu_ID>
+ """
+ ap = subparsers.add_parser("lo")
+ ap.add_argument("state", choices=["up", "down"])
+ ap.add_argument("subu_id")
+
+
def build_arg_parser(program_name: str) -> argparse.ArgumentParser:
"""Build the top level argument parser for the subu manager."""
parser = argparse.ArgumentParser(prog=program_name, add_help=False)
register_network_commands(subparsers)
register_option_commands(subparsers)
register_exec_commands(subparsers)
+ register_device_commands(subparsers)
+ register_lo_commands(subparsers)
return parser
def _collect_parse_errors(ns, program_name: str) -> list[str]:
- """Check for semantic argument problems and collect error strings.
-
- We keep this lightweight and focused on things we can know without
- touching the filesystem or the database.
- """
+ """Check for semantic argument problems and collect error strings."""
errors: list[str] = []
+ if ns.verb == "device":
+ if ns.action == "scan":
+ return dispatch.device_scan(ns.base_dir)
+
if ns.verb == "subu":
sv = getattr(ns, "subu_verb", None)
- if sv == "make":
+ if sv in ("make", "capture"):
if not ns.path or len(ns.path) < 2:
errors.append(
- "subu make requires at least <masu> and one <subu> component"
+ f"subu {sv} requires at least <masu> and one <subu> component"
)
elif sv in ("remove", "info"):
- # Either ID or path. For path we need at least 2 tokens.
if ns.target.startswith("subu_"):
- if ns.verb == "subu" and sv in ("remove", "info") and ns.rest:
+ if ns.rest:
errors.append(
f"{program_name} subu {sv} with an ID form must not have extra path tokens"
)
errors.append(
f"{program_name} subu {sv} <masu> <subu> [<subu> ...] requires at least two tokens"
)
+ elif sv == "option":
+ # For incommon, same ID vs path rules as info/remove.
+ if ns.opt_name == "incommon":
+ if ns.target.startswith("subu_"):
+ if ns.rest:
+ errors.append(
+ f"{program_name} subu option {ns.opt_action} incommon with an ID form "
+ "must not have extra path tokens"
+ )
+ else:
+ if len([ns.target] + list(ns.rest)) < 2:
+ errors.append(
+ f"{program_name} subu option {ns.opt_action} incommon "
+ "<masu> <subu> [<subu> ...] requires at least two tokens"
+ )
return errors
if argv is None:
argv = sys.argv[1:]
- # Determine the program name for text/help:
- #
- # 1. If SUBU_PROGNAME is set in the environment, use that.
- # 2. Otherwise, derive it from sys.argv[0] (basename).
prog_override = os.environ.get("SUBU_PROGNAME")
if prog_override:
program_name = prog_override
text = make_text(program_name)
- # No arguments is the same as "help".
+ # No arguments is the same as "usage".
if not argv:
print(text.usage(), end="")
return 0
- # Simple verbs that bypass argparse so they always work.
simple = {
"help": text.help,
"--help": text.help,
print(text.version(), end="")
return 0
- # Collect semantic parse errors before we call dispatch.
errors = _collect_parse_errors(ns, program_name)
if errors:
for msg in errors:
sv = ns.subu_verb
if sv == "make":
return dispatch.subu_make(ns.path)
+ if sv == "capture":
+ return dispatch.subu_capture(ns.path)
if sv == "list":
return dispatch.subu_list()
if sv == "info":
return dispatch.subu_info(ns.target, ns.rest)
if sv == "remove":
return dispatch.subu_remove(ns.target, ns.rest)
+ if sv == "option":
+ # For now only 'incommon' is supported.
+ return dispatch.subu_option_incommon(ns.opt_action, ns.target, ns.rest)
+
+ if ns.verb == "lo":
+ return dispatch.lo_toggle(ns.subu_id, ns.state)
if ns.verb == "WG":
v = ns.wg_verb
return dispatch.network_toggle(ns.subu_id, ns.state)
if ns.verb == "option":
- if ns.area == "Unix":
- return dispatch.option_unix(ns.mode)
+ # global options still placeholder
+ print("option: not yet implemented", file=sys.stderr)
+ return 1
if ns.verb == "exec":
if not ns.cmd:
return 2
return dispatch.exec(ns.subu_id, ns.cmd)
- # If we reach here, the verb was not recognised.
print(text.usage(), end="")
return 2
# dispatch.py
# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-import os, sys, sqlite3
+import os, sys
import env
from domain import subu as subu_domain
+from domain import device as device_domain
from infrastructure.db import open_db, ensure_schema
from infrastructure.options_store import set_option
+from infrastructure.unix import (
+ ensure_unix_group,
+ ensure_unix_user,
+ ensure_user_in_group,
+ remove_user_from_group,
+ user_exists,
+)
+
+
+
+# lo_toggle, WG, attach, network, exec stubs remain below.
+
def _require_root(action: str) -> bool:
- """Return True if running as root, else print error and return False."""
try:
euid = os.geteuid()
except AttributeError:
- # Non-POSIX; be permissive.
return True
if euid != 0:
print(f"{action}: must be run as root", file=sys.stderr)
def _open_existing_db() -> sqlite3.Connection | None:
- """Open the existing manager DB or print an error and return None.
-
- This does *not* create the DB; callers should ensure that
- 'db load schema' has been run first.
- """
path = _db_path()
if not os.path.exists(path):
print(
print(f"subu: unable to open database '{path}': {e}", file=sys.stderr)
return None
- # Use row objects so we can access columns by name.
conn.row_factory = sqlite3.Row
return conn
def db_load_schema() -> int:
- """Handle: CLI.py db load schema
-
- Ensure the DB directory exists, open the DB, and apply schema.sql.
- """
if not _require_root("db load schema"):
return 1
return 0
-def subu_make(path_tokens: list[str]) -> int:
- """Handle: CLI.py subu make <masu> <subu> [<subu> ...]
+def device_scan(base_dir: str ="/mnt") -> int:
+ """
+ Handle:
+
+ CLI.py device scan [--base-dir /mnt]
+
+ Behavior:
+ * Open the subu SQLite database.
+ * Scan all directories under base_dir that contain 'user_data'.
+ * For each such device:
+ - Upsert a row in 'device'.
+ - Reconcile all subu under user_data into 'subu', marking
+ them as online and associating them with the device.
+ - Mark any previously-known subu on that device that are not
+ seen in this scan as offline.
+
+ This function does NOT perform any cryptsetup, mount, or bindfs work.
+ It assumes devices are already mounted at /mnt/<mapname>.
+ """
+ try:
+ conn = open_db()
+ except Exception as e:
+ print(
+ f"subu: cannot open database at '{env.db_path()}': {e}",
+ file =sys.stderr,
+ )
+ return 1
- path_tokens is:
- [masu, subu, subu, ...]
+ try:
+ count = device_domain.scan_and_reconcile(conn, base_dir)
+ if count == 0:
+ print(f"no user_data devices found under {base_dir}")
+ else:
+ print(f"scanned {count} device(s) under {base_dir}")
+ return 0
+ finally:
+ conn.close()
- Example:
- CLI.py subu make Thomas developer
- CLI.py subu make Thomas developer bolt
- """
+
+def _insert_subu_row(conn, owner: str, subu_path: list[str], username: str) -> int | None:
+ """Insert a row into subu table and return its id."""
+ leaf_name = subu_path[-1]
+ full_unix_name = username
+ path_str = " ".join([owner] + subu_path)
+ netns_name = full_unix_name
+
+ from datetime import datetime, timezone
+
+ now = datetime.now(timezone.utc).isoformat()
+
+ try:
+ cur = conn.execute(
+ """INSERT INTO subu
+ (owner, name, full_unix_name, path, netns_name, wg_id, created_at, updated_at)
+ VALUES (?, ?, ?, ?, ?, NULL, ?, ?)""",
+ (owner, leaf_name, full_unix_name, path_str, netns_name, now, now),
+ )
+ conn.commit()
+ return cur.lastrowid
+ except sqlite3.IntegrityError as e:
+ print(
+ f"subu: database already has an entry for '{full_unix_name}': {e}",
+ file=sys.stderr,
+ )
+ return None
+ except Exception as e:
+ print(f"subu: error recording subu in database: {e}", file=sys.stderr)
+ return None
+
+
+def _maybe_add_to_incommon(conn, owner: str, new_username: str) -> None:
+ """If owner has an incommon subu configured, add new_username to that group."""
+ key = f"incommon.{owner}"
+ spec = get_option(key, None)
+ if not spec:
+ return
+ if not isinstance(spec, str) or not spec.startswith("subu_"):
+ print(
+ f"subu: warning: option {key} has unexpected value '{spec}', "
+ "expected 'subu_<id>'",
+ file=sys.stderr,
+ )
+ return
+ try:
+ subu_numeric_id = int(spec.split("_", 1)[1])
+ except ValueError:
+ print(
+ f"subu: warning: option {key} has invalid Subu_ID '{spec}'",
+ file=sys.stderr,
+ )
+ return
+
+ row = conn.execute(
+ "SELECT full_unix_name FROM subu WHERE id = ? AND owner = ?",
+ (subu_numeric_id, owner),
+ ).fetchone()
+ if row is None:
+ print(
+ f"subu: warning: option {key} refers to missing subu id {subu_numeric_id}",
+ file=sys.stderr,
+ )
+ return
+
+ incommon_unix = row["full_unix_name"]
+ ensure_user_in_group(new_username, incommon_unix)
+
+
+def subu_make(path_tokens: list[str]) -> int:
if not path_tokens or len(path_tokens) < 2:
print(
"subu: make requires at least <masu> and one <subu> component",
masu = path_tokens[0]
subu_path = path_tokens[1:]
- # 1) Create Unix user + groups.
try:
username = subu_domain.make_subu(masu, subu_path)
except SystemExit as e:
- # Domain layer uses SystemExit for validation errors.
print(f"subu: {e}", file=sys.stderr)
return 2
except Exception as e:
print(f"subu: error creating Unix user for {path_tokens}: {e}", file=sys.stderr)
return 1
- # 2) Record in SQLite.
conn = _open_existing_db()
if conn is None:
- # Unix side succeeded but DB is missing; report and stop.
return 1
- owner = masu
- leaf_name = subu_path[-1]
- full_unix_name = username
- path_str = " ".join([masu] + subu_path)
- netns_name = full_unix_name # simple deterministic choice for now
+ subu_id = _insert_subu_row(conn, masu, subu_path, username)
+ if subu_id is None:
+ conn.close()
+ return 1
- from datetime import datetime, timezone
+ # If this owner has an incommon subu, join that group.
+ _maybe_add_to_incommon(conn, masu, username)
- now = datetime.now(timezone.utc).isoformat()
+ conn.close()
+ print(f"subu_{subu_id}")
+ return 0
- try:
- cur = conn.execute(
- """INSERT INTO subu
- (owner, name, full_unix_name, path, netns_name, wg_id, created_at, updated_at)
- VALUES (?, ?, ?, ?, ?, NULL, ?, ?)""",
- (owner, leaf_name, full_unix_name, path_str, netns_name, now, now),
+
+def subu_capture(path_tokens: list[str]) -> int:
+ """Handle: subu capture <masu> <subu> [<subu> ...]
+
+ Capture an existing Unix user into the database and fix its groups.
+ """
+ if not path_tokens or len(path_tokens) < 2:
+ print(
+ "subu: capture requires at least <masu> and one <subu> component",
+ file=sys.stderr,
)
- conn.commit()
- subu_id = cur.lastrowid
- except sqlite3.IntegrityError as e:
- print(f"subu: database already has an entry for '{full_unix_name}': {e}", file=sys.stderr)
- conn.close()
+ return 2
+
+ if not _require_root("subu capture"):
return 1
- except Exception as e:
- print(f"subu: error recording subu in database: {e}", file=sys.stderr)
+
+ masu = path_tokens[0]
+ subu_path = path_tokens[1:]
+
+ # Compute expected Unix username.
+ try:
+ username = subu_domain.subu_username(masu, subu_path)
+ except SystemExit as e:
+ print(f"subu: {e}", file=sys.stderr)
+ return 2
+
+ if not user_exists(username):
+ print(f"subu: capture: Unix user '{username}' does not exist", file=sys.stderr)
+ return 1
+
+ # Ensure the primary group exists (legacy systems should already have it).
+ ensure_unix_group(username)
+
+ # Ensure membership in ancestor groups for traversal.
+ ancestor_groups = subu_domain._ancestor_group_names(masu, subu_path)
+ for gname in ancestor_groups:
+ ensure_user_in_group(username, gname)
+
+ conn = _open_existing_db()
+ if conn is None:
+ return 1
+
+ subu_id = _insert_subu_row(conn, masu, subu_path, username)
+ if subu_id is None:
conn.close()
return 1
- conn.close()
+ # Honor any incommon config for this owner.
+ _maybe_add_to_incommon(conn, masu, username)
+ conn.close()
print(f"subu_{subu_id}")
return 0
def _resolve_subu(conn: sqlite3.Connection, target: str, rest: list[str]) -> sqlite3.Row | None:
- """Resolve a subu either by ID (subu_7) or by path.
-
- ID form:
- target = 'subu_7', rest = []
-
- Path form:
- target = masu, rest = [subu, subu, ...]
- """
- # ID form: subu_7
+ """Resolve a subu either by ID (subu_7) or by path."""
if target.startswith("subu_") and not rest:
try:
subu_numeric_id = int(target.split("_", 1)[1])
print(f"subu: no such subu with id {subu_numeric_id}", file=sys.stderr)
return row
- # Path form
path_tokens = [target] + list(rest)
if len(path_tokens) < 2:
print(
def subu_list() -> int:
- """Handle: CLI.py subu list"""
conn = _open_existing_db()
if conn is None:
return 1
cur = conn.execute(
"SELECT id, owner, path, full_unix_name, netns_name, wg_id FROM subu ORDER BY id"
)
-
rows = cur.fetchall()
conn.close()
def subu_info(target: str, rest: list[str]) -> int:
- """Handle: CLI.py subu info <Subu_ID>|<masu> <subu> [<subu> ...]
-
- Examples:
- CLI.py subu info subu_3
- CLI.py subu info Thomas developer bolt
- """
conn = _open_existing_db()
if conn is None:
return 1
def subu_remove(target: str, rest: list[str]) -> int:
- """Handle: CLI.py subu remove <Subu_ID>|<masu> <subu> [<subu> ...]
-
- This removes both:
- - the Unix user/group associated with the subu, and
- - the corresponding row from the database.
- """
if not _require_root("subu remove"):
return 1
return 1
subu_id = row["id"]
- owner = row["owner"]
path_str = row["path"]
path_tokens = path_str.split(" ")
- if not path_tokens or len(path_tokens) < 2:
+ if len(path_tokens) < 2:
print(f"subu: stored path is invalid for id {subu_id}: '{path_str}'", file=sys.stderr)
conn.close()
return 1
masu = path_tokens[0]
subu_path = path_tokens[1:]
- # 1) Remove Unix user + group.
try:
username = subu_domain.remove_subu(masu, subu_path)
except SystemExit as e:
conn.close()
return 1
- # 2) Remove from DB.
try:
conn.execute("DELETE FROM subu WHERE id = ?", (subu_id,))
conn.commit()
return 1
conn.close()
-
print(f"removed subu_{subu_id} {username}")
return 0
-# Placeholder stubs for existing option / WG / network / exec wiring.
-# These keep the module importable while we focus on subu + db.
+def _subu_home_path(owner: str, path_str: str) -> str:
+ """Compute subu home dir from owner and path string."""
+ tokens = path_str.split(" ")
+ if not tokens or tokens[0] != owner:
+ return ""
+ subu_tokens = tokens[1:]
+ path = os.path.join("/home", owner)
+ for t in subu_tokens:
+ path = os.path.join(path, "subu_data", t)
+ return path
+
+
+def _chmod_incommon(home: str) -> None:
+ try:
+ st = os.stat(home)
+ except FileNotFoundError:
+ print(f"subu: warning: incommon home '{home}' does not exist", file=sys.stderr)
+ return
+
+ mode = st.st_mode
+ mode |= (stat.S_IRGRP | stat.S_IXGRP)
+ mode &= ~(stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
+ os.chmod(home, mode)
+
+
+def _chmod_private(home: str) -> None:
+ try:
+ st = os.stat(home)
+ except FileNotFoundError:
+ print(f"subu: warning: home '{home}' does not exist for clear incommon", file=sys.stderr)
+ return
+
+ mode = st.st_mode
+ mode &= ~(stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)
+ os.chmod(home, mode)
+
+
+def subu_option_incommon(action: str, target: str, rest: list[str]) -> int:
+ """Handle:
+
+ subu option set incommon <Subu_ID>|<masu> <subu> [<subu> ...]
+ subu option clear incommon <Subu_ID>|<masu> <subu> [<subu> ...]
+ """
+ if not _require_root(f"subu option {action} incommon"):
+ return 1
+
+ conn = _open_existing_db()
+ if conn is None:
+ return 1
+
+ row = _resolve_subu(conn, target, rest)
+ if row is None:
+ conn.close()
+ return 1
+
+ subu_id = row["id"]
+ owner = row["owner"]
+ full_unix_name = row["full_unix_name"]
+ path_str = row["path"]
+
+ key = f"incommon.{owner}"
+ spec = f"subu_{subu_id}"
+
+ if action == "set":
+ # Record mapping.
+ set_option(key, spec)
+
+ # Make all subu of this owner members of this group.
+ cur = conn.execute(
+ "SELECT full_unix_name FROM subu WHERE owner = ?",
+ (owner,),
+ )
+ rows = cur.fetchall()
+ for r in rows:
+ uname = r["full_unix_name"]
+ if uname == full_unix_name:
+ continue
+ ensure_user_in_group(uname, full_unix_name)
+
+ # Adjust directory permissions on incommon home.
+ home = _subu_home_path(owner, path_str)
+ if home:
+ _chmod_incommon(home)
+
+ conn.close()
+ print(f"incommon for {owner} set to subu_{subu_id}")
+ return 0
+
+ # clear
+ current = get_option(key, "")
+ if current and current != spec:
+ print(
+ f"subu: incommon for owner '{owner}' is currently {current}, not {spec}",
+ file=sys.stderr,
+ )
+ conn.close()
+ return 1
+
+ # Clear mapping.
+ set_option(key, "")
+
+ # Remove other subu from this group.
+ cur = conn.execute(
+ "SELECT full_unix_name FROM subu WHERE owner = ?",
+ (owner,),
+ )
+ rows = cur.fetchall()
+ for r in rows:
+ uname = r["full_unix_name"]
+ if uname == full_unix_name:
+ continue
+ remove_user_from_group(uname, full_unix_name)
+
+ home = _subu_home_path(owner, path_str)
+ if home:
+ _chmod_private(home)
+
+ conn.close()
+ print(f"incommon for {owner} cleared from subu_{subu_id}")
+ return 0
+
+
+# --- existing stubs (unchanged) -------------------------------------------
def wg_global(arg1: str | None) -> int:
print("WG global: not yet implemented", file=sys.stderr)
return 1
-def option_unix(mode: str) -> int:
- # example: store a Unix handling mode into options_store
- set_option("Unix.mode", mode)
- print(f"Unix mode set to {mode}")
- return 0
+def lo_toggle(subu_id: str, state: str) -> int:
+ print("lo up/down: not yet implemented", file=sys.stderr)
+ return 1
def exec(subu_id: str, cmd_argv: list[str]) -> int:
--- /dev/null
+# domain/device.py
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+"""
+Device-aware reconciliation of subu state.
+
+This module assumes:
+ * Devices with user data are mounted as: /mnt/<mapname>
+ * On each device, user data lives under: /mnt/<mapname>/user_data/<masu>
+ * Subu home directories follow the pattern:
+
+ /mnt/<mapname>/user_data/<masu>/subu_data/<subu0>/subu_data/<subu1>/...
+
+ i.e., each subu directory may contain a 'subu_data' directory for children.
+
+Given an open SQLite connection, scan_and_reconcile() will:
+
+ * Discover all devices under a base directory (default: /mnt)
+ * For each device that has 'user_data':
+ - Upsert a row in the 'device' table.
+ - Discover all subu paths for all masus on that device.
+ - Upsert/refresh rows in 'subu' with device_id + is_online=1.
+ - Mark any previously-known subu on that device that are not seen
+ in the current scan as is_online=0.
+"""
+
+import os
+from datetime import datetime
+from pathlib import Path
+
+from domain.subu import subu_username
+
+
+def _utc_now() -> str:
+ """
+ Return a UTC timestamp string suitable for created_at/updated_at/last_seen.
+ Example: '2025-11-11T05:30:12Z'
+ """
+ return datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+
+
+def _walk_subu_paths(subu_root: Path):
+ """
+ Yield all subu paths under a root 'subu_data' directory.
+
+ Layout assumption:
+
+ subu_root/
+ S0/
+ ...files...
+ subu_data/
+ S1/
+ ...
+ subu_data/
+ S2/
+ ...
+
+ For each logical path:
+ ['S0'] (top-level)
+ ['S0','S1'] (child)
+ ['S0','S1','S2'] (grand-child)
+ ...
+
+ we yield the list of path components.
+ """
+ stack: list[tuple[Path, list[str]]] = [(subu_root, [])]
+
+ while stack:
+ current_root, prefix = stack.pop()
+ try:
+ entries = sorted(current_root.iterdir(), key =lambda p: p.name)
+ except FileNotFoundError:
+ continue
+
+ for entry in entries:
+ if not entry.is_dir():
+ continue
+ name = entry.name
+ path_components = prefix + [name]
+ yield path_components
+
+ child_subu_data = entry / "subu_data"
+ if child_subu_data.is_dir():
+ stack.append((child_subu_data, path_components))
+
+
+def _upsert_device(
+ conn,
+ mapname: str,
+ mount_point: str,
+ kind: str ="external",
+) -> int:
+ """
+ Ensure a row exists for this device and return its id.
+
+ We do NOT try to discover fs_uuid/luks_uuid here; those can be filled
+ in later if desired.
+ """
+ now = _utc_now()
+
+ cur = conn.execute(
+ "SELECT id FROM device WHERE mapname = ?",
+ (mapname,),
+ )
+ row = cur.fetchone()
+
+ if row:
+ device_id = row["id"]
+ conn.execute(
+ """
+ UPDATE device
+ SET mount_point = ?,
+ kind = ?,
+ state = 'online',
+ last_seen = ?
+ WHERE id = ?
+ """,
+ (mount_point, kind, now, device_id),
+ )
+ else:
+ cur = conn.execute(
+ """
+ INSERT INTO device (mapname, mount_point, kind, state, last_seen)
+ VALUES (?, ?, ?, 'online', ?)
+ """,
+ (mapname, mount_point, kind, now),
+ )
+ device_id = cur.lastrowid
+
+ return int(device_id)
+
+
+def _ensure_subu_row(
+ conn,
+ device_id: int,
+ owner: str,
+ subu_path_components: list[str],
+ full_path_str: str,
+ now: str,
+):
+ """
+ Upsert a row in 'subu' for (owner, subu_path_components) on device_id.
+
+ full_path_str is the human-readable path, e.g. 'Thomas local' or
+ 'Thomas developer bolt'.
+ """
+ if not subu_path_components:
+ return
+
+ leaf_name = subu_path_components[-1]
+ full_unix_name = subu_username(owner, subu_path_components)
+
+ # For now, we simply reuse full_unix_name as netns_name.
+ netns_name = full_unix_name
+
+ # See if a row already exists for this owner + path.
+ cur = conn.execute(
+ "SELECT id FROM subu WHERE owner = ? AND path = ?",
+ (owner, full_path_str),
+ )
+ row = cur.fetchone()
+
+ if row:
+ subu_id = row["id"]
+ conn.execute(
+ """
+ UPDATE subu
+ SET device_id = ?,
+ is_online = 1,
+ updated_at = ?
+ WHERE id = ?
+ """,
+ (device_id, now, subu_id),
+ )
+ return
+
+ # Insert new row
+ conn.execute(
+ """
+ INSERT INTO subu (
+ owner,
+ name,
+ full_unix_name,
+ path,
+ netns_name,
+ wg_id,
+ device_id,
+ is_online,
+ created_at,
+ updated_at
+ )
+ VALUES (?, ?, ?, ?, ?, NULL, ?, 1, ?, ?)
+ """,
+ (
+ owner,
+ leaf_name,
+ full_unix_name,
+ full_path_str,
+ netns_name,
+ device_id,
+ now,
+ now,
+ ),
+ )
+
+
+def _reconcile_device_for_mount(conn, device_id: int, user_data_dir: Path):
+ """
+ Reconcile all subu on a particular device.
+
+ user_data_dir is a path like:
+
+ /mnt/Eagle/user_data
+
+ Under which we expect:
+
+ /mnt/Eagle/user_data/<masu>/subu_data/...
+ """
+ now = _utc_now()
+ discovered: set[tuple[str, str]] = set()
+
+ try:
+ owners = sorted(user_data_dir.iterdir(), key =lambda p: p.name)
+ except FileNotFoundError:
+ return
+
+ for owner_entry in owners:
+ if not owner_entry.is_dir():
+ continue
+
+ owner = owner_entry.name
+ subu_root = owner_entry / "subu_data"
+ if not subu_root.is_dir():
+ # masu with no subu_data; skip
+ continue
+
+ for subu_components in _walk_subu_paths(subu_root):
+ # Full logical path is: [owner] + subu_components
+ path_tokens = [owner] + subu_components
+ path_str = " ".join(path_tokens)
+ discovered.add((owner, path_str))
+
+ _ensure_subu_row(
+ conn =conn,
+ device_id =device_id,
+ owner =owner,
+ subu_path_components =subu_components,
+ full_path_str =path_str,
+ now =now,
+ )
+
+ # Mark any existing subu on this device that we did NOT see as offline.
+ cur = conn.execute(
+ "SELECT id, owner, path FROM subu WHERE device_id = ?",
+ (device_id,),
+ )
+ existing = cur.fetchall()
+ for row in existing:
+ key = (row["owner"], row["path"])
+ if key in discovered:
+ continue
+ conn.execute(
+ """
+ UPDATE subu
+ SET is_online = 0,
+ updated_at = ?
+ WHERE id = ?
+ """,
+ (now, row["id"]),
+ )
+
+
+def scan_and_reconcile(conn, base_dir: str ="/mnt") -> int:
+ """
+ Scan all mounted devices under base_dir for 'user_data' trees and
+ reconcile them into the database.
+
+ For each directory 'base_dir/<mapname>':
+
+ * If it contains 'user_data', it is treated as a device.
+ * A 'device' row is upserted (mapname = basename).
+ * All subu under the corresponding user_data tree are reconciled.
+
+ Returns:
+ Number of devices that were processed.
+ """
+ root = Path(base_dir)
+ if not root.is_dir():
+ return 0
+
+ processed = 0
+
+ for entry in sorted(root.iterdir(), key =lambda p: p.name):
+ if not entry.is_dir():
+ continue
+
+ mapname = entry.name
+ user_data_dir = entry / "user_data"
+ if not user_data_dir.is_dir():
+ continue
+
+ mount_point = str(entry)
+ device_id = _upsert_device(conn, mapname, mount_point)
+ _reconcile_device_for_mount(conn, device_id, user_data_dir)
+ processed += 1
+
+ conn.commit()
+ return processed
remove_unix_user_and_group,
user_exists,
)
+from typing import Iterable
+import sqlite3, datetime
+def _now(): return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+
+def subu_username(owner: str, parts: list[str]) -> str:
+ return "_".join([owner] + parts)
+
+def ensure_chain(conn, owner: str, parts: list[str], device_id: int|None, online: bool):
+ """
+ Ensure that owner/parts[...] exists as a chain; return leaf row (dict).
+ """
+ conn.row_factory = sqlite3.Row
+ parent_id = None
+ chain: list[str] = []
+ now = _now()
+ for seg in parts:
+ row = conn.execute(
+ "SELECT * FROM subu_node WHERE owner=? AND name=? AND parent_id IS ?",
+ (owner, seg, parent_id)
+ ).fetchone()
+ if row:
+ parent_id = row["id"]
+ chain.append(seg)
+ continue
+ chain.append(seg)
+ full_path = " ".join([owner] + chain)
+ full_unix = subu_username(owner, chain)
+ netns = full_unix
+ conn.execute(
+ """INSERT INTO subu_node(owner,name,parent_id,full_unix_name,full_path,netns_name,
+ device_id,is_online,created_at,updated_at)
+ VALUES(?,?,?,?,?,?,?, ?,?,?)""",
+ (owner, seg, parent_id, full_unix, full_path, netns,
+ device_id, 1 if online else 0, now, now)
+ )
+ parent_id = conn.execute("SELECT last_insert_rowid() id").fetchone()["id"]
+ leaf = conn.execute("SELECT * FROM subu_node WHERE id=?", (parent_id,)).fetchone()
+ return dict(leaf)
+
+def find_by_path(conn, owner: str, parts: list[str]):
+ conn.row_factory = sqlite3.Row
+ parent_id = None
+ for seg in parts:
+ row = conn.execute(
+ "SELECT * FROM subu_node WHERE owner=? AND name=? AND parent_id IS ?",
+ (owner, seg, parent_id)
+ ).fetchone()
+ if not row:
+ return None
+ parent_id = row["id"]
+ return dict(row)
+
+def list_children(conn, node_id: int|None, owner: str):
+ """
+ node_id=None lists top-level subu of owner; otherwise children of node_id.
+ """
+ conn.row_factory = sqlite3.Row
+ if node_id is None:
+ cur = conn.execute("SELECT * FROM subu_node WHERE owner=? AND parent_id IS NULL ORDER BY name", (owner,))
+ else:
+ cur = conn.execute("SELECT * FROM subu_node WHERE owner=? AND parent_id=? ORDER BY name", (owner, node_id))
+ return [dict(r) for r in cur.fetchall()]
def _validate_token(label: str, token: str) -> str:
"""
return token_stripped
-def subu_username(masu: str, path_components: list[str]) -> str:
- """
- Build the Unix username for a subu.
-
- Examples:
- masu = "Thomas", path = ["S0"] -> "Thomas_S0"
- masu = "Thomas", path = ["S0","S1"] -> "Thomas_S0_S1"
-
- The path is:
- masu subu subu ...
- """
- masu_s = _validate_token("masu", masu).replace(" ", "_")
- subu_parts: list[str] = []
- for s in path_components:
- subu_parts.append(_validate_token("subu", s).replace(" ", "_"))
- parts = [masu_s] + subu_parts
- return "_".join(parts)
-
-
def _parent_username(masu: str, path_components: list[str]) -> str | None:
"""
Return the Unix username of the parent subu, or None if this is top-level.
return username
-
def remove_subu(masu: str, path_components: list[str]) -> str:
"""
Remove the Unix user and group for this subu, if they exist.
"""
Software / CLI version.
"""
- return "0.3.4"
+ return "0.3.5"
def db_schema_version() -> str:
--- /dev/null
+# domain/device.py
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+"""
+Device-aware reconciliation of subu state.
+
+This module assumes:
+ * Devices with user data are mounted as: /mnt/<mapname>
+ * On each device, user data lives under: /mnt/<mapname>/user_data/<masu>
+ * Subu home directories follow the pattern:
+
+ /mnt/<mapname>/user_data/<masu>/subu_data/<subu0>/subu_data/<subu1>/...
+
+ i.e., each subu directory may contain a 'subu_data' directory for children.
+
+Given an open SQLite connection, scan_and_reconcile() will:
+
+ * Discover all devices under a base directory (default: /mnt)
+ * For each device that has 'user_data':
+ - Upsert a row in the 'device' table.
+ - Discover all subu paths for all masus on that device.
+ - Upsert/refresh rows in 'subu' with device_id + is_online=1.
+ - Mark any previously-known subu on that device that are not seen
+ in the current scan as is_online=0.
+"""
+
+import os
+from datetime import datetime
+from pathlib import Path
+
+from domain.subu import subu_username
+
+
+def _utc_now() -> str:
+ """
+ Return a UTC timestamp string suitable for created_at/updated_at/last_seen.
+ Example: '2025-11-11T05:30:12Z'
+ """
+ return datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+
+
+def _walk_subu_paths(subu_root: Path):
+ """
+ Yield all subu paths under a root 'subu_data' directory.
+
+ Layout assumption:
+
+ subu_root/
+ S0/
+ ...files...
+ subu_data/
+ S1/
+ ...
+ subu_data/
+ S2/
+ ...
+
+ For each logical path:
+ ['S0'] (top-level)
+ ['S0','S1'] (child)
+ ['S0','S1','S2'] (grand-child)
+ ...
+
+ we yield the list of path components.
+ """
+ stack: list[tuple[Path, list[str]]] = [(subu_root, [])]
+
+ while stack:
+ current_root, prefix = stack.pop()
+ try:
+ entries = sorted(current_root.iterdir(), key =lambda p: p.name)
+ except FileNotFoundError:
+ continue
+
+ for entry in entries:
+ if not entry.is_dir():
+ continue
+ name = entry.name
+ path_components = prefix + [name]
+ yield path_components
+
+ child_subu_data = entry / "subu_data"
+ if child_subu_data.is_dir():
+ stack.append((child_subu_data, path_components))
+
+
+def _upsert_device(
+ conn,
+ mapname: str,
+ mount_point: str,
+ kind: str ="external",
+) -> int:
+ """
+ Ensure a row exists for this device and return its id.
+
+ We do NOT try to discover fs_uuid/luks_uuid here; those can be filled
+ in later if desired.
+ """
+ now = _utc_now()
+
+ cur = conn.execute(
+ "SELECT id FROM device WHERE mapname = ?",
+ (mapname,),
+ )
+ row = cur.fetchone()
+
+ if row:
+ device_id = row["id"]
+ conn.execute(
+ """
+ UPDATE device
+ SET mount_point = ?,
+ kind = ?,
+ state = 'online',
+ last_seen = ?
+ WHERE id = ?
+ """,
+ (mount_point, kind, now, device_id),
+ )
+ else:
+ cur = conn.execute(
+ """
+ INSERT INTO device (mapname, mount_point, kind, state, last_seen)
+ VALUES (?, ?, ?, 'online', ?)
+ """,
+ (mapname, mount_point, kind, now),
+ )
+ device_id = cur.lastrowid
+
+ return int(device_id)
+
+
+def _ensure_subu_row(
+ conn,
+ device_id: int,
+ owner: str,
+ subu_path_components: list[str],
+ full_path_str: str,
+ now: str,
+):
+ """
+ Upsert a row in 'subu' for (owner, subu_path_components) on device_id.
+
+ full_path_str is the human-readable path, e.g. 'Thomas local' or
+ 'Thomas developer bolt'.
+ """
+ if not subu_path_components:
+ return
+
+ leaf_name = subu_path_components[-1]
+ full_unix_name = subu_username(owner, subu_path_components)
+
+ # For now, we simply reuse full_unix_name as netns_name.
+ netns_name = full_unix_name
+
+ # See if a row already exists for this owner + path.
+ cur = conn.execute(
+ "SELECT id FROM subu WHERE owner = ? AND path = ?",
+ (owner, full_path_str),
+ )
+ row = cur.fetchone()
+
+ if row:
+ subu_id = row["id"]
+ conn.execute(
+ """
+ UPDATE subu
+ SET device_id = ?,
+ is_online = 1,
+ updated_at = ?
+ WHERE id = ?
+ """,
+ (device_id, now, subu_id),
+ )
+ return
+
+ # Insert new row
+ conn.execute(
+ """
+ INSERT INTO subu (
+ owner,
+ name,
+ full_unix_name,
+ path,
+ netns_name,
+ wg_id,
+ device_id,
+ is_online,
+ created_at,
+ updated_at
+ )
+ VALUES (?, ?, ?, ?, ?, NULL, ?, 1, ?, ?)
+ """,
+ (
+ owner,
+ leaf_name,
+ full_unix_name,
+ full_path_str,
+ netns_name,
+ device_id,
+ now,
+ now,
+ ),
+ )
+
+
+def _reconcile_device_for_mount(conn, device_id: int, user_data_dir: Path):
+ """
+ Reconcile all subu on a particular device.
+
+ user_data_dir is a path like:
+
+ /mnt/Eagle/user_data
+
+ Under which we expect:
+
+ /mnt/Eagle/user_data/<masu>/subu_data/...
+ """
+ now = _utc_now()
+ discovered: set[tuple[str, str]] = set()
+
+ try:
+ owners = sorted(user_data_dir.iterdir(), key =lambda p: p.name)
+ except FileNotFoundError:
+ return
+
+ for owner_entry in owners:
+ if not owner_entry.is_dir():
+ continue
+
+ owner = owner_entry.name
+ subu_root = owner_entry / "subu_data"
+ if not subu_root.is_dir():
+ # masu with no subu_data; skip
+ continue
+
+ for subu_components in _walk_subu_paths(subu_root):
+ # Full logical path is: [owner] + subu_components
+ path_tokens = [owner] + subu_components
+ path_str = " ".join(path_tokens)
+ discovered.add((owner, path_str))
+
+ _ensure_subu_row(
+ conn =conn,
+ device_id =device_id,
+ owner =owner,
+ subu_path_components =subu_components,
+ full_path_str =path_str,
+ now =now,
+ )
+
+ # Mark any existing subu on this device that we did NOT see as offline.
+ cur = conn.execute(
+ "SELECT id, owner, path FROM subu WHERE device_id = ?",
+ (device_id,),
+ )
+ existing = cur.fetchall()
+ for row in existing:
+ key = (row["owner"], row["path"])
+ if key in discovered:
+ continue
+ conn.execute(
+ """
+ UPDATE subu
+ SET is_online = 0,
+ updated_at = ?
+ WHERE id = ?
+ """,
+ (now, row["id"]),
+ )
+
+
+def scan_and_reconcile(conn, base_dir: str ="/mnt") -> int:
+ """
+ Scan all mounted devices under base_dir for 'user_data' trees and
+ reconcile them into the database.
+
+ For each directory 'base_dir/<mapname>':
+
+ * If it contains 'user_data', it is treated as a device.
+ * A 'device' row is upserted (mapname = basename).
+ * All subu under the corresponding user_data tree are reconciled.
+
+ Returns:
+ Number of devices that were processed.
+ """
+ root = Path(base_dir)
+ if not root.is_dir():
+ return 0
+
+ processed = 0
+
+ for entry in sorted(root.iterdir(), key =lambda p: p.name):
+ if not entry.is_dir():
+ continue
+
+ mapname = entry.name
+ user_data_dir = entry / "user_data"
+ if not user_data_dir.is_dir():
+ continue
+
+ mount_point = str(entry)
+ device_id = _upsert_device(conn, mapname, mount_point)
+ _reconcile_device_for_mount(conn, device_id, user_data_dir)
+ processed += 1
+
+ conn.commit()
+ return processed
-- schema.sql
--
--- 5.6.1 read and executed by db.ensure_schema
-
+-- Schema for subu manager, including device-aware subu tracking.
+-- Devices that can hold one or more masu homes.
+-- Each row represents a physical (or logical) storage volume
+-- identified by a mapname like 'Eagle' and optionally by UUIDs.
+CREATE TABLE device (
+ id INTEGER PRIMARY KEY,
+ mapname TEXT NOT NULL UNIQUE, -- e.g. 'Eagle'
+ fs_uuid TEXT, -- filesystem UUID (optional)
+ luks_uuid TEXT, -- LUKS UUID (optional)
+ mount_point TEXT NOT NULL, -- e.g. '/mnt/Eagle'
+ kind TEXT NOT NULL DEFAULT 'external', -- 'local','external','encrypted',...
+ state TEXT NOT NULL DEFAULT 'offline', -- 'online','offline','error'
+ last_seen TEXT NOT NULL -- ISO8601 UTC timestamp
+);
-CREATE TABLE subu (
- id INTEGER PRIMARY KEY,
- owner TEXT NOT NULL, -- root user, e.g. 'Thomas'
- name TEXT NOT NULL, -- leaf, e.g. 'US', 'Rabbit'
- full_unix_name TEXT NOT NULL UNIQUE, -- e.g. 'Thomas_US_Rabbit'
- path TEXT NOT NULL, -- e.g. 'Thomas US Rabbit'
- netns_name TEXT NOT NULL,
- wg_id INTEGER, -- nullable for now
- created_at TEXT NOT NULL,
- updated_at TEXT NOT NULL
+-- parents via parent_id; one row per node in the tree
+CREATE TABLE subu_node (
+ id INTEGER PRIMARY KEY,
+ owner TEXT NOT NULL, -- masu
+ name TEXT NOT NULL, -- this segment (e.g., developer, bolt)
+ parent_id INTEGER, -- NULL for top-level subu under owner
+ full_unix_name TEXT NOT NULL UNIQUE, -- e.g., Thomas_developer_bolt
+ full_path TEXT NOT NULL, -- e.g., "Thomas developer bolt"
+ netns_name TEXT NOT NULL, -- default = full_unix_name
+ device_id INTEGER, -- NULL=local
+ is_online INTEGER NOT NULL DEFAULT 1,
+ created_at TEXT NOT NULL,
+ updated_at TEXT NOT NULL,
+ FOREIGN KEY(parent_id) REFERENCES subu_node(id),
+ FOREIGN KEY(device_id) REFERENCES device(id),
+ UNIQUE(owner, name, parent_id) -- no duplicate siblings
);
+
+CREATE INDEX idx_node_owner_parent ON subu_node(owner, parent_id);
+CREATE INDEX idx_node_device ON subu_node(device_id);
+
"""
Ensure 'user' is a member of supplementary group 'group'.
- - Raises if either user or group does not exist.
- - No-op if the membership is already present.
+ No-op if already present.
"""
if not user_exists(user):
raise RuntimeError(f"ensure_user_in_group: user '{user}' does not exist")
if user in g.gr_mem:
return
- # usermod -a -G adds the group, preserving existing ones.
run(["usermod", "-a", "-G", group, user])
+def remove_user_from_group(user: str, group: str):
+ """
+ Ensure 'user' is NOT a member of supplementary group 'group'.
+
+ No-op if user or group is missing, or if user is not a member.
+ """
+ if not user_exists(user):
+ return
+ if not group_exists(group):
+ return
+
+ g = grp.getgrnam(group)
+ if user not in g.gr_mem:
+ return
+
+ # gpasswd -d user group is the standard way on Debian/Ubuntu.
+ # We treat failures as non-fatal.
+ run(["gpasswd", "-d", user, group], check =False)
+
+
def remove_unix_user_and_group(name: str):
"""
Remove a Unix user and group that match this name, if they exist.
The user is removed first, then the group.
"""
if user_exists(name):
- # userdel returns non-zero if, for example, the user is logged in.
run(["userdel", name])
if group_exists(name):
run(["groupdel", name])
f"{p} — Subu manager (v{v})\n"
"\n"
"Usage:\n"
+
f" {p} # usage\n"
f" {p} help # detailed help\n"
f" {p} example # example workflow\n"
f" {p} version # print version\n"
"\n"
+
f" {p} db load schema\n"
"\n"
+
f" {p} subu make <masu> <subu> [<subu> ...]\n"
+ f" {p} subu capture <masu> <subu> [<subu> ...]\n"
f" {p} subu list\n"
f" {p} subu info subu_<id>\n"
f" {p} subu info <masu> <subu> [<subu> ...]\n"
f" {p} subu remove subu_<id>\n"
f" {p} subu remove <masu> <subu> [<subu> ...]\n"
+ f" {p} subu option set incommon subu_<id>\n"
+ f" {p} subu option set incommon <masu> <subu> [<subu> ...]\n"
+ f" {p} subu option clear incommon subu_<id>\n"
+ f" {p} subu option clear incommon <masu> <subu> [<subu> ...]\n"
"\n"
+
f" {p} lo up|down <Subu_ID>\n"
"\n"
+
f" {p} WG global <BaseCIDR>\n"
f" {p} WG make <host:port>\n"
f" {p} WG server_provided_public_key <WG_ID> <Base64Key>\n"
f" {p} WG up <WG_ID>\n"
f" {p} WG down <WG_ID>\n"
"\n"
+
f" {p} attach WG <Subu_ID> <WG_ID>\n"
f" {p} detach WG <Subu_ID>\n"
"\n"
+
f" {p} network up|down <Subu_ID>\n"
"\n"
+
f" {p} option set <Subu_ID> <name> <value>\n"
f" {p} option get <Subu_ID> <name>\n"
f" {p} option list <Subu_ID>\n"
"\n"
+
f" {p} exec <Subu_ID> -- <cmd> ...\n"
)
clean [DIR] Remove the contents of the release directories.
- For DIR=manager: clean $REPO_HOME/release/manager.
- For other DIR values: clean only that subdirectory under the release root.
+ list List $REPO_HOME/release as an indented tree: PERMS OWNER DATE NAME.
ls List $REPO_HOME/release as an indented tree: PERMS OWNER DATE NAME.
help Show this message.
dry write [DIR]
cmd_clean(args[0] if args else None)
elif cmd == "ls":
list_tree(rpath())
+ elif cmd == "list":
+ list_tree(rpath())
elif cmd == "help":
print(HELP)
elif cmd == "dry":
--- /dev/null
+#!/usr/bin/env python3
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+"""
+Remove all Python cache directories under the current working directory.
+
+Specifically:
+ * __pycache__
+ * __psycache__ (included in case of typo'd cache dirs)
+
+Usage:
+ python3 clean_pycache.py
+ ./clean_pycache.py
+"""
+
+import os, shutil, sys
+
+
+def is_cache_dir(name: str) -> bool:
+ """
+ Return True if the directory name should be treated as a cache directory.
+ """
+ return name in ("__pycache__", "__psycache__")
+
+
+def remove_cache_dir(path: str) -> None:
+ """
+ Remove a cache directory and print what we did.
+ """
+ try:
+ shutil.rmtree(path)
+ print(f"removed: {path}")
+ except FileNotFoundError:
+ # Someone else removed it; ignore.
+ pass
+ except PermissionError as e:
+ print(f"warning: cannot remove {path}: {e}", file =sys.stderr)
+ except OSError as e:
+ print(f"warning: error removing {path}: {e}", file =sys.stderr)
+
+
+def walk_and_clean(start: str) -> None:
+ """
+ Walk the tree from 'start' downward and remove all cache dirs.
+ """
+ for root, dirs, files in os.walk(start, topdown =True):
+ # Work on a copy so we can safely modify 'dirs' while iterating.
+ for d in list(dirs):
+ if is_cache_dir(d):
+ full = os.path.join(root, d)
+ remove_cache_dir(full)
+ # Prevent os.walk from descending into it.
+ try:
+ dirs.remove(d)
+ except ValueError:
+ pass
+
+
+def main(argv=None) -> int:
+ if argv is None:
+ argv = sys.argv[1:]
+
+ start_dir = os.getcwd()
+ walk_and_clean(start_dir)
+ return 0
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
"""Register subu related commands under 'subu':
subu make <masu> <subu> [<subu>]*
+ subu capture <masu> <subu> [<subu>]*
subu remove <Subu_ID> | <masu> <subu> [<subu>]*
subu list
subu info <Subu_ID> | <masu> <subu> [<subu>]*
+ subu option set|clear incommon <Subu_ID> | <masu> <subu> [<subu>]*
"""
ap_subu = subparsers.add_parser("subu")
subu_sub = ap_subu.add_subparsers(dest="subu_verb")
ap = subu_sub.add_parser("make")
ap.add_argument("path", nargs="+")
+ # capture: path[0] is masu, remaining elements are the subu chain
+ ap = subu_sub.add_parser("capture")
+ ap.add_argument("path", nargs="+")
+
# remove: either ID or path
ap = subu_sub.add_parser("remove")
ap.add_argument("target")
ap.add_argument("target")
ap.add_argument("rest", nargs="*")
+ # option incommon
+ ap = subu_sub.add_parser("option")
+ ap.add_argument("opt_action", choices=["set", "clear"])
+ ap.add_argument("opt_name", choices=["incommon"])
+ ap.add_argument("target")
+ ap.add_argument("rest", nargs="*")
-def register_wireguard_commands(subparsers):
- """Register WireGuard related commands, grouped under 'WG':
- WG global <BaseCIDR>
- WG make <host:port>
- WG server_provided_public_key <WG_ID> <Base64Key>
- WG info|information <WG_ID>
- WG up|down <WG_ID>
- """
+def register_wireguard_commands(subparsers):
+ """Register WireGuard related commands, grouped under 'WG'."""
ap = subparsers.add_parser("WG")
ap.add_argument(
"wg_verb",
def register_option_commands(subparsers):
- """Register option commands.
+ """Register global option commands (non-subu-specific for now):
- Current surface:
- option Unix <mode> # e.g. dry|run
+ option set|get|list ...
"""
ap = subparsers.add_parser("option")
- ap.add_argument("area", choices=["Unix"])
- ap.add_argument("mode")
+ ap.add_argument("action", choices=["set", "get", "list"])
+ ap.add_argument("subu_id")
+ ap.add_argument("name", nargs="?")
+ ap.add_argument("value", nargs="?")
def register_exec_commands(subparsers):
"""
ap = subparsers.add_parser("exec")
ap.add_argument("subu_id")
- # Use a dedicated "--" argument so that:
- # CLI.py exec subu_7 -- curl -4v https://ifconfig.me
- # works as before.
ap.add_argument("--", dest="cmd", nargs=argparse.REMAINDER, default=[])
+def register_lo_commands(subparsers):
+ """Register lo command:
+
+ lo up|down <Subu_ID>
+ """
+ ap = subparsers.add_parser("lo")
+ ap.add_argument("state", choices=["up", "down"])
+ ap.add_argument("subu_id")
+
+
def build_arg_parser(program_name: str) -> argparse.ArgumentParser:
"""Build the top level argument parser for the subu manager."""
parser = argparse.ArgumentParser(prog=program_name, add_help=False)
register_db_commands(subparsers)
register_subu_commands(subparsers)
+ register_lo_commands(subparsers)
register_wireguard_commands(subparsers)
register_attach_commands(subparsers)
register_network_commands(subparsers)
def _collect_parse_errors(ns, program_name: str) -> list[str]:
- """Check for semantic argument problems and collect error strings.
-
- We keep this lightweight and focused on things we can know without
- touching the filesystem or the database.
- """
+ """Check for semantic argument problems and collect error strings."""
errors: list[str] = []
if ns.verb == "subu":
sv = getattr(ns, "subu_verb", None)
- if sv == "make":
+ if sv in ("make", "capture"):
if not ns.path or len(ns.path) < 2:
errors.append(
- "subu make requires at least <masu> and one <subu> component"
+ f"subu {sv} requires at least <masu> and one <subu> component"
)
elif sv in ("remove", "info"):
- # Either ID or path. For path we need at least 2 tokens.
if ns.target.startswith("subu_"):
- if ns.verb == "subu" and sv in ("remove", "info") and ns.rest:
+ if ns.rest:
errors.append(
f"{program_name} subu {sv} with an ID form must not have extra path tokens"
)
errors.append(
f"{program_name} subu {sv} <masu> <subu> [<subu> ...] requires at least two tokens"
)
+ elif sv == "option":
+ # For incommon, same ID vs path rules as info/remove.
+ if ns.opt_name == "incommon":
+ if ns.target.startswith("subu_"):
+ if ns.rest:
+ errors.append(
+ f"{program_name} subu option {ns.opt_action} incommon with an ID form "
+ "must not have extra path tokens"
+ )
+ else:
+ if len([ns.target] + list(ns.rest)) < 2:
+ errors.append(
+ f"{program_name} subu option {ns.opt_action} incommon "
+ "<masu> <subu> [<subu> ...] requires at least two tokens"
+ )
return errors
if argv is None:
argv = sys.argv[1:]
- # Determine the program name for text/help:
- #
- # 1. If SUBU_PROGNAME is set in the environment, use that.
- # 2. Otherwise, derive it from sys.argv[0] (basename).
prog_override = os.environ.get("SUBU_PROGNAME")
if prog_override:
program_name = prog_override
text = make_text(program_name)
- # No arguments is the same as "help".
+ # No arguments is the same as "usage".
if not argv:
print(text.usage(), end="")
return 0
- # Simple verbs that bypass argparse so they always work.
simple = {
"help": text.help,
"--help": text.help,
print(text.version(), end="")
return 0
- # Collect semantic parse errors before we call dispatch.
errors = _collect_parse_errors(ns, program_name)
if errors:
for msg in errors:
sv = ns.subu_verb
if sv == "make":
return dispatch.subu_make(ns.path)
+ if sv == "capture":
+ return dispatch.subu_capture(ns.path)
if sv == "list":
return dispatch.subu_list()
if sv == "info":
return dispatch.subu_info(ns.target, ns.rest)
if sv == "remove":
return dispatch.subu_remove(ns.target, ns.rest)
+ if sv == "option":
+ # For now only 'incommon' is supported.
+ return dispatch.subu_option_incommon(ns.opt_action, ns.target, ns.rest)
+
+ if ns.verb == "lo":
+ return dispatch.lo_toggle(ns.subu_id, ns.state)
if ns.verb == "WG":
v = ns.wg_verb
return dispatch.network_toggle(ns.subu_id, ns.state)
if ns.verb == "option":
- if ns.area == "Unix":
- return dispatch.option_unix(ns.mode)
+ # global options still placeholder
+ print("option: not yet implemented", file=sys.stderr)
+ return 1
if ns.verb == "exec":
if not ns.cmd:
return 2
return dispatch.exec(ns.subu_id, ns.cmd)
- # If we reach here, the verb was not recognised.
print(text.usage(), end="")
return 2
# dispatch.py
# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-import os, sys, sqlite3
+import os, sys, sqlite3, stat
import env
from domain import subu as subu_domain
from infrastructure.db import open_db, ensure_schema
-from infrastructure.options_store import set_option
+from infrastructure.options_store import set_option, get_option
+from infrastructure.unix import (
+ ensure_unix_group,
+ ensure_unix_user,
+ ensure_user_in_group,
+ remove_user_from_group,
+ user_exists,
+)
+# lo_toggle, WG, attach, network, exec stubs remain below.
def _require_root(action: str) -> bool:
- """Return True if running as root, else print error and return False."""
try:
euid = os.geteuid()
except AttributeError:
- # Non-POSIX; be permissive.
return True
if euid != 0:
print(f"{action}: must be run as root", file=sys.stderr)
def _open_existing_db() -> sqlite3.Connection | None:
- """Open the existing manager DB or print an error and return None.
-
- This does *not* create the DB; callers should ensure that
- 'db load schema' has been run first.
- """
path = _db_path()
if not os.path.exists(path):
print(
print(f"subu: unable to open database '{path}': {e}", file=sys.stderr)
return None
- # Use row objects so we can access columns by name.
conn.row_factory = sqlite3.Row
return conn
def db_load_schema() -> int:
- """Handle: CLI.py db load schema
-
- Ensure the DB directory exists, open the DB, and apply schema.sql.
- """
if not _require_root("db load schema"):
return 1
return 0
-def subu_make(path_tokens: list[str]) -> int:
- """Handle: CLI.py subu make <masu> <subu> [<subu> ...]
+def _insert_subu_row(conn, owner: str, subu_path: list[str], username: str) -> int | None:
+ """Insert a row into subu table and return its id."""
+ leaf_name = subu_path[-1]
+ full_unix_name = username
+ path_str = " ".join([owner] + subu_path)
+ netns_name = full_unix_name
- path_tokens is:
- [masu, subu, subu, ...]
+ from datetime import datetime, timezone
- Example:
- CLI.py subu make Thomas developer
- CLI.py subu make Thomas developer bolt
- """
+ now = datetime.now(timezone.utc).isoformat()
+
+ try:
+ cur = conn.execute(
+ """INSERT INTO subu
+ (owner, name, full_unix_name, path, netns_name, wg_id, created_at, updated_at)
+ VALUES (?, ?, ?, ?, ?, NULL, ?, ?)""",
+ (owner, leaf_name, full_unix_name, path_str, netns_name, now, now),
+ )
+ conn.commit()
+ return cur.lastrowid
+ except sqlite3.IntegrityError as e:
+ print(
+ f"subu: database already has an entry for '{full_unix_name}': {e}",
+ file=sys.stderr,
+ )
+ return None
+ except Exception as e:
+ print(f"subu: error recording subu in database: {e}", file=sys.stderr)
+ return None
+
+
+def _maybe_add_to_incommon(conn, owner: str, new_username: str) -> None:
+ """If owner has an incommon subu configured, add new_username to that group."""
+ key = f"incommon.{owner}"
+ spec = get_option(key, None)
+ if not spec:
+ return
+ if not isinstance(spec, str) or not spec.startswith("subu_"):
+ print(
+ f"subu: warning: option {key} has unexpected value '{spec}', "
+ "expected 'subu_<id>'",
+ file=sys.stderr,
+ )
+ return
+ try:
+ subu_numeric_id = int(spec.split("_", 1)[1])
+ except ValueError:
+ print(
+ f"subu: warning: option {key} has invalid Subu_ID '{spec}'",
+ file=sys.stderr,
+ )
+ return
+
+ row = conn.execute(
+ "SELECT full_unix_name FROM subu WHERE id = ? AND owner = ?",
+ (subu_numeric_id, owner),
+ ).fetchone()
+ if row is None:
+ print(
+ f"subu: warning: option {key} refers to missing subu id {subu_numeric_id}",
+ file=sys.stderr,
+ )
+ return
+
+ incommon_unix = row["full_unix_name"]
+ ensure_user_in_group(new_username, incommon_unix)
+
+
+def subu_make(path_tokens: list[str]) -> int:
if not path_tokens or len(path_tokens) < 2:
print(
"subu: make requires at least <masu> and one <subu> component",
masu = path_tokens[0]
subu_path = path_tokens[1:]
- # 1) Create Unix user + groups.
try:
username = subu_domain.make_subu(masu, subu_path)
except SystemExit as e:
- # Domain layer uses SystemExit for validation errors.
print(f"subu: {e}", file=sys.stderr)
return 2
except Exception as e:
print(f"subu: error creating Unix user for {path_tokens}: {e}", file=sys.stderr)
return 1
- # 2) Record in SQLite.
conn = _open_existing_db()
if conn is None:
- # Unix side succeeded but DB is missing; report and stop.
return 1
- owner = masu
- leaf_name = subu_path[-1]
- full_unix_name = username
- path_str = " ".join([masu] + subu_path)
- netns_name = full_unix_name # simple deterministic choice for now
+ subu_id = _insert_subu_row(conn, masu, subu_path, username)
+ if subu_id is None:
+ conn.close()
+ return 1
- from datetime import datetime, timezone
+ # If this owner has an incommon subu, join that group.
+ _maybe_add_to_incommon(conn, masu, username)
- now = datetime.now(timezone.utc).isoformat()
+ conn.close()
+ print(f"subu_{subu_id}")
+ return 0
- try:
- cur = conn.execute(
- """INSERT INTO subu
- (owner, name, full_unix_name, path, netns_name, wg_id, created_at, updated_at)
- VALUES (?, ?, ?, ?, ?, NULL, ?, ?)""",
- (owner, leaf_name, full_unix_name, path_str, netns_name, now, now),
+
+def subu_capture(path_tokens: list[str]) -> int:
+ """Handle: subu capture <masu> <subu> [<subu> ...]
+
+ Capture an existing Unix user into the database and fix its groups.
+ """
+ if not path_tokens or len(path_tokens) < 2:
+ print(
+ "subu: capture requires at least <masu> and one <subu> component",
+ file=sys.stderr,
)
- conn.commit()
- subu_id = cur.lastrowid
- except sqlite3.IntegrityError as e:
- print(f"subu: database already has an entry for '{full_unix_name}': {e}", file=sys.stderr)
- conn.close()
+ return 2
+
+ if not _require_root("subu capture"):
return 1
- except Exception as e:
- print(f"subu: error recording subu in database: {e}", file=sys.stderr)
+
+ masu = path_tokens[0]
+ subu_path = path_tokens[1:]
+
+ # Compute expected Unix username.
+ try:
+ username = subu_domain.subu_username(masu, subu_path)
+ except SystemExit as e:
+ print(f"subu: {e}", file=sys.stderr)
+ return 2
+
+ if not user_exists(username):
+ print(f"subu: capture: Unix user '{username}' does not exist", file=sys.stderr)
+ return 1
+
+ # Ensure the primary group exists (legacy systems should already have it).
+ ensure_unix_group(username)
+
+ # Ensure membership in ancestor groups for traversal.
+ ancestor_groups = subu_domain._ancestor_group_names(masu, subu_path)
+ for gname in ancestor_groups:
+ ensure_user_in_group(username, gname)
+
+ conn = _open_existing_db()
+ if conn is None:
+ return 1
+
+ subu_id = _insert_subu_row(conn, masu, subu_path, username)
+ if subu_id is None:
conn.close()
return 1
- conn.close()
+ # Honor any incommon config for this owner.
+ _maybe_add_to_incommon(conn, masu, username)
+ conn.close()
print(f"subu_{subu_id}")
return 0
def _resolve_subu(conn: sqlite3.Connection, target: str, rest: list[str]) -> sqlite3.Row | None:
- """Resolve a subu either by ID (subu_7) or by path.
-
- ID form:
- target = 'subu_7', rest = []
-
- Path form:
- target = masu, rest = [subu, subu, ...]
- """
- # ID form: subu_7
+ """Resolve a subu either by ID (subu_7) or by path."""
if target.startswith("subu_") and not rest:
try:
subu_numeric_id = int(target.split("_", 1)[1])
print(f"subu: no such subu with id {subu_numeric_id}", file=sys.stderr)
return row
- # Path form
path_tokens = [target] + list(rest)
if len(path_tokens) < 2:
print(
def subu_list() -> int:
- """Handle: CLI.py subu list"""
conn = _open_existing_db()
if conn is None:
return 1
cur = conn.execute(
"SELECT id, owner, path, full_unix_name, netns_name, wg_id FROM subu ORDER BY id"
)
-
rows = cur.fetchall()
conn.close()
def subu_info(target: str, rest: list[str]) -> int:
- """Handle: CLI.py subu info <Subu_ID>|<masu> <subu> [<subu> ...]
-
- Examples:
- CLI.py subu info subu_3
- CLI.py subu info Thomas developer bolt
- """
conn = _open_existing_db()
if conn is None:
return 1
def subu_remove(target: str, rest: list[str]) -> int:
- """Handle: CLI.py subu remove <Subu_ID>|<masu> <subu> [<subu> ...]
-
- This removes both:
- - the Unix user/group associated with the subu, and
- - the corresponding row from the database.
- """
if not _require_root("subu remove"):
return 1
return 1
subu_id = row["id"]
- owner = row["owner"]
path_str = row["path"]
path_tokens = path_str.split(" ")
- if not path_tokens or len(path_tokens) < 2:
+ if len(path_tokens) < 2:
print(f"subu: stored path is invalid for id {subu_id}: '{path_str}'", file=sys.stderr)
conn.close()
return 1
masu = path_tokens[0]
subu_path = path_tokens[1:]
- # 1) Remove Unix user + group.
try:
username = subu_domain.remove_subu(masu, subu_path)
except SystemExit as e:
conn.close()
return 1
- # 2) Remove from DB.
try:
conn.execute("DELETE FROM subu WHERE id = ?", (subu_id,))
conn.commit()
return 1
conn.close()
-
print(f"removed subu_{subu_id} {username}")
return 0
-# Placeholder stubs for existing option / WG / network / exec wiring.
-# These keep the module importable while we focus on subu + db.
+def _subu_home_path(owner: str, path_str: str) -> str:
+ """Compute subu home dir from owner and path string."""
+ tokens = path_str.split(" ")
+ if not tokens or tokens[0] != owner:
+ return ""
+ subu_tokens = tokens[1:]
+ path = os.path.join("/home", owner)
+ for t in subu_tokens:
+ path = os.path.join(path, "subu_data", t)
+ return path
+
+
+def _chmod_incommon(home: str) -> None:
+ try:
+ st = os.stat(home)
+ except FileNotFoundError:
+ print(f"subu: warning: incommon home '{home}' does not exist", file=sys.stderr)
+ return
+
+ mode = st.st_mode
+ mode |= (stat.S_IRGRP | stat.S_IXGRP)
+ mode &= ~(stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
+ os.chmod(home, mode)
+
+
+def _chmod_private(home: str) -> None:
+ try:
+ st = os.stat(home)
+ except FileNotFoundError:
+ print(f"subu: warning: home '{home}' does not exist for clear incommon", file=sys.stderr)
+ return
+
+ mode = st.st_mode
+ mode &= ~(stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)
+ os.chmod(home, mode)
+
+
+def subu_option_incommon(action: str, target: str, rest: list[str]) -> int:
+ """Handle:
+
+ subu option set incommon <Subu_ID>|<masu> <subu> [<subu> ...]
+ subu option clear incommon <Subu_ID>|<masu> <subu> [<subu> ...]
+ """
+ if not _require_root(f"subu option {action} incommon"):
+ return 1
+
+ conn = _open_existing_db()
+ if conn is None:
+ return 1
+
+ row = _resolve_subu(conn, target, rest)
+ if row is None:
+ conn.close()
+ return 1
+
+ subu_id = row["id"]
+ owner = row["owner"]
+ full_unix_name = row["full_unix_name"]
+ path_str = row["path"]
+
+ key = f"incommon.{owner}"
+ spec = f"subu_{subu_id}"
+
+ if action == "set":
+ # Record mapping.
+ set_option(key, spec)
+
+ # Make all subu of this owner members of this group.
+ cur = conn.execute(
+ "SELECT full_unix_name FROM subu WHERE owner = ?",
+ (owner,),
+ )
+ rows = cur.fetchall()
+ for r in rows:
+ uname = r["full_unix_name"]
+ if uname == full_unix_name:
+ continue
+ ensure_user_in_group(uname, full_unix_name)
+
+ # Adjust directory permissions on incommon home.
+ home = _subu_home_path(owner, path_str)
+ if home:
+ _chmod_incommon(home)
+
+ conn.close()
+ print(f"incommon for {owner} set to subu_{subu_id}")
+ return 0
+
+ # clear
+ current = get_option(key, "")
+ if current and current != spec:
+ print(
+ f"subu: incommon for owner '{owner}' is currently {current}, not {spec}",
+ file=sys.stderr,
+ )
+ conn.close()
+ return 1
+
+ # Clear mapping.
+ set_option(key, "")
+
+ # Remove other subu from this group.
+ cur = conn.execute(
+ "SELECT full_unix_name FROM subu WHERE owner = ?",
+ (owner,),
+ )
+ rows = cur.fetchall()
+ for r in rows:
+ uname = r["full_unix_name"]
+ if uname == full_unix_name:
+ continue
+ remove_user_from_group(uname, full_unix_name)
+
+ home = _subu_home_path(owner, path_str)
+ if home:
+ _chmod_private(home)
+
+ conn.close()
+ print(f"incommon for {owner} cleared from subu_{subu_id}")
+ return 0
+
+
+# --- existing stubs (unchanged) -------------------------------------------
def wg_global(arg1: str | None) -> int:
print("WG global: not yet implemented", file=sys.stderr)
return 1
-def option_unix(mode: str) -> int:
- # example: store a Unix handling mode into options_store
- set_option("Unix.mode", mode)
- print(f"Unix mode set to {mode}")
- return 0
+def lo_toggle(subu_id: str, state: str) -> int:
+ print("lo up/down: not yet implemented", file=sys.stderr)
+ return 1
def exec(subu_id: str, cmd_argv: list[str]) -> int:
"""
Software / CLI version.
"""
- return "0.3.4"
+ return "0.3.5"
def db_schema_version() -> str:
"""
Ensure 'user' is a member of supplementary group 'group'.
- - Raises if either user or group does not exist.
- - No-op if the membership is already present.
+ No-op if already present.
"""
if not user_exists(user):
raise RuntimeError(f"ensure_user_in_group: user '{user}' does not exist")
if user in g.gr_mem:
return
- # usermod -a -G adds the group, preserving existing ones.
run(["usermod", "-a", "-G", group, user])
+def remove_user_from_group(user: str, group: str):
+ """
+ Ensure 'user' is NOT a member of supplementary group 'group'.
+
+ No-op if user or group is missing, or if user is not a member.
+ """
+ if not user_exists(user):
+ return
+ if not group_exists(group):
+ return
+
+ g = grp.getgrnam(group)
+ if user not in g.gr_mem:
+ return
+
+ # gpasswd -d user group is the standard way on Debian/Ubuntu.
+ # We treat failures as non-fatal.
+ run(["gpasswd", "-d", user, group], check =False)
+
+
def remove_unix_user_and_group(name: str):
"""
Remove a Unix user and group that match this name, if they exist.
The user is removed first, then the group.
"""
if user_exists(name):
- # userdel returns non-zero if, for example, the user is logged in.
run(["userdel", name])
if group_exists(name):
run(["groupdel", name])
f"{p} — Subu manager (v{v})\n"
"\n"
"Usage:\n"
+
f" {p} # usage\n"
f" {p} help # detailed help\n"
f" {p} example # example workflow\n"
f" {p} version # print version\n"
"\n"
+
f" {p} db load schema\n"
"\n"
+
f" {p} subu make <masu> <subu> [<subu> ...]\n"
+ f" {p} subu capture <masu> <subu> [<subu> ...]\n"
f" {p} subu list\n"
f" {p} subu info subu_<id>\n"
f" {p} subu info <masu> <subu> [<subu> ...]\n"
f" {p} subu remove subu_<id>\n"
f" {p} subu remove <masu> <subu> [<subu> ...]\n"
+ f" {p} subu option set incommon subu_<id>\n"
+ f" {p} subu option set incommon <masu> <subu> [<subu> ...]\n"
+ f" {p} subu option clear incommon subu_<id>\n"
+ f" {p} subu option clear incommon <masu> <subu> [<subu> ...]\n"
"\n"
+
f" {p} lo up|down <Subu_ID>\n"
"\n"
+
f" {p} WG global <BaseCIDR>\n"
f" {p} WG make <host:port>\n"
f" {p} WG server_provided_public_key <WG_ID> <Base64Key>\n"
f" {p} WG up <WG_ID>\n"
f" {p} WG down <WG_ID>\n"
"\n"
+
f" {p} attach WG <Subu_ID> <WG_ID>\n"
f" {p} detach WG <Subu_ID>\n"
"\n"
+
f" {p} network up|down <Subu_ID>\n"
"\n"
+
f" {p} option set <Subu_ID> <name> <value>\n"
f" {p} option get <Subu_ID> <name>\n"
f" {p} option list <Subu_ID>\n"
"\n"
+
f" {p} exec <Subu_ID> -- <cmd> ...\n"
)
--- /dev/null
+
+
+checkout -> tester branch select
+where -> tester branch show name
+pull -> tester branch pull
+push -> tester branch push
+
+other-release-merge -> developer branch pull
+list-other-release-updates -> developer branch list new
+
+publish
+
+For each command, giving no arguments prints a short description of what it does and a usage message. The 'help' command does the same.
-#+TITLE: Core Branches, Tester Policies, and Workflows
+#+TITLE: Core Branches, Policies, and Workflows
#+AUTHOR: Reasoning Technology
#+OPTIONS: num:t
-Branches and naming
+* Branches and naming
-1.1. Developer branch
+** Developer branch
-=core_developer_branch=
-
-Single canonical development branch.
-
-Developer commits source changes and updated release artifacts here.
-
-1.2. Tester branches
-
-=core_tester_branch=
-
-Main testing branch.
-
-Must correspond to =core_developer_branch=.
-
-Tester does normal day-to-day work here.
-
-=release_tester_<major>[.<minor>]=
-
-Testing branches tied to specific releases.
-
-Each must correspond to an existing =release_<major>[.<minor>]= branch.
-
-1.3. Release branches
-
-=release_<major>[.<minor>]=
-
-Branches representing published releases.
-
-There is never a =release_<major>.0=; that case is named =release_<major>=.
-
-=release_<3>= is treated as version =(3,0)= for ordering.
-
-1.4. Version ordering
-
-Versions are ordered as integer pairs =(major, minor)=.
-
-Minor is treated as =0= when absent.
-
-For two versions:
-
-=(M1, m1) > (M2, m2)= if:
-
-=M1 > M2=, or
-
-=M1 == M2= and =m1 > m2=.
-
-Examples:
-
-=3.10 > 3.2= (minor 10 vs minor 2).
-
-=4.0 > 3.999= (any major 4 > any major 3).
-
-=release_3.2 > release_3= because =(3,2) > (3,0)=.
-
-Directory layout and roles
-
-2.1. Fixed directories
-
-=$REPO_HOME= :: Absolute project root.
-
-Always exists (Harmony skeleton):
-
-=$REPO_HOME/developer=
-
-=$REPO_HOME/tester=
-
-=$REPO_HOME/release=
-
-2.2. Role responsibilities
-
-Developer
-
-Works under =$REPO_HOME/developer= (e.g. after ~. ./env_developer~).
-
-Updates code and runs =release= to populate =$REPO_HOME/release=.
-
-Commits and pushes changes on =core_developer_branch=.
-
-Tester
-
-Works under =$REPO_HOME/tester= (after ~. ./env_tester~).
-
-Writes tests and runs them against artifacts in =$REPO_HOME/release=.
-
-Commits tests on =core_tester_branch= or =release_tester_*=.
-
-Toolsmith
-
-Works on shared code under =tool_shared/= (not the git-ignored =tool_shared/third_party/=).
-
-Commits shared tool changes on =core_developer_branch=.
-
-Developer gets these via ~git pull~ on =core_developer_branch=.
-
-Tester gets them when merging from =core_developer_branch= into tester branches.
-
-2.3. third_party tools
-
-=tool_shared/third_party/= is git-ignored.
-
-Each user is responsible for their own copies there.
-
-Shared, version-controlled tools live elsewhere in the tree (e.g. under =tool_shared/= but not in =third_party/=).
-
-Policies
-
-3.1. Tester write policy
-
-Tester may only write under:
-
-=$REPO_HOME/tester/**=
-
-It is against policy for tester-side pushes to include changes outside =$REPO_HOME/tester/**=.
-
-Current tools enforce this softly:
-
-They list such paths and ask “these are against policy, are you sure?”.
-
-The tester may still proceed, but is expected to do so consciously.
-
-3.2. Developer vs tester domains
-
-Developer owns:
-
-=developer/
-
-=release/
-
-Shared code (e.g. under =tool_shared/=, except git-ignored areas).
-
-Tester owns:
-
-=tester/= and its subtrees.
-
-Toolsmith changes to shared code are made on =core_developer_branch= and flow to testers via merges.
-
-3.3. Executable files
-
-For any merge/pull helper that changes files:
-
-Executable files (based on mode bits) are always listed explicitly before confirmation.
-
-This applies to:
-
-Merges from developer branches.
-
-Pulls/merges from tester’s own remote branches.
-
-3.4. Soft enforcement
-
-For now, tools do not hard-fail on policy violations.
-
-Instead:
-
-They list affected files (especially outside =$REPO_HOME/tester/**=).
-
-They prompt for confirmation:
-
-“These paths are against policy, are you sure you want to proceed?”
-
-Future enforcement via permissions is possible, but not assumed.
-
-Workflows
-
-4.1. Developer workflow (core_developer_branch)
-
-Normal cycle:
-
-Work on code under =developer/=
-
-Build and run tests locally.
-
-Run =release= to update =$REPO_HOME/release= artifacts.
-
-Commit changes (code + release artifacts + any shared tools).
-
-Push to =origin/core_developer_branch=.
-
-The state of =release/= on =core_developer_branch= defines “what the tester will see next time they merge”.
-
-4.2. Tester workflow on core_tester_branch
-
-Enter environment:
-
-~. ./env_tester~
-
-Ensure branch is =core_tester_branch= (via =checkout core=).
-
-Normal cycle:
-
-Run tests against current =$REPO_HOME/release=.
-
-Edit tests under =tester/=
-
-Commit tests as needed.
-
-Optionally push to =origin/core_tester_branch= using the policy-aware =push= command.
-
-Accepting a new core release from developer:
-
-Run a “list updates” / “merge core” helper:
-
-It compares =core_tester_branch= with =core_developer_branch=, restricted to =release/= (and possibly shared tools).
-
-Lists changed files and executable files.
-
-If acceptable:
-
-Merge =origin/core_developer_branch= into =core_tester_branch=.
-
-Commit/resolve as needed.
-
-Push updated =core_tester_branch=.
-
-4.3. Tester workflow on release_tester_<major>[.<minor>]
-
-When a published release needs regression or bug-fix testing:
-
-Identify target release branch =release_<major>[.<minor>]=.
-
-Checkout corresponding tester branch:
-
-=release_tester_<major>[.<minor>]=
-
-Testing cycle:
-
-Run tests against =$REPO_HOME/release= as it exists on this branch.
-
-Update/re-run tests under =tester/= if necessary.
-
-Commit to the =release_tester_* branch.
-
-Optionally push that branch for collaboration.
-
-When hotfixes or updated releases appear:
-
-Corresponding =release_<major>[.<minor>]= is updated on =core_developer_branch= side.
-
-The tester’s “release merge” helper can bring those changes into =release_tester_<major>[.<minor>]= in a controlled way (listing affected paths first).
-
-4.4. Toolsmith workflow
-
-Toolsmith edits shared code (not =third_party/=) on =core_developer_branch=.
-
-Normal steps:
-
-~. ./env_developer~ (or a dedicated toolsmith env that still uses =core_developer_branch=).
-
-Modify shared code (e.g. under =tool_shared/=).
-
-Commit changes on =core_developer_branch=.
-
-Push to =origin/core_developer_branch=.
-
-Developer picks up these changes with their usual pulls.
-
-Tester gets these changes when merging from =core_developer_branch= into tester branches.
-
-Tools can highlight such changes (and executables) before merges so the tester knows when shared infrastructure has shifted.
-
-Tester commands (conceptual)
-
-5.1. General
-
-Commands live under =$REPO_HOME/tester/tool/= and are on =PATH= after ~. ./env_tester~.
-
-Each command:
-
-Has its own CLI parser.
-
-Calls worker functions (in shared modules) to do the real work.
-
-Does not call other commands via their CLIs (no loops).
-
-5.2. Branch introspection
-
-=where=
-
-Prints current branch name.
-
-Returns an error if current branch is not a testing branch:
-
-=core_tester_branch=, or
-
-Any =release_tester_<major>[.<minor>]=.
-
-=checkout core=
-
-Switches to =core_tester_branch=.
-
-This is the “normal mode of work” for new testing.
-
-=checkout release [<major>] [.<minor>]=
-
-Without arguments:
-
-Finds all =release_tester_<major>[.<minor>]= branches.
-
-Parses their versions as integer pairs.
-
-Selects the highest =(major, minor)= (with minor =0 when absent).
-
-With arguments:
-
-Attempts to checkout the specific =release_tester_<major>[.<minor>]= branch.
-
-Errors if that branch does not exist.
-
-5.3. Release updates and merges
-
-=list other release updates=
-
-Compares the current tester branch with the corresponding developer branch:
-
-=core_tester_branch= ↔ =core_developer_branch=
-
-=release_tester_<N[.M]> ↔ release_<N[.M]>=
-
-Restricts comparison to =$REPO_HOME/release/**=.
-
-Lists:
-
-Files that are newer on developer (tester is behind).
-
-Files that are newer on tester (unexpected; highlighted as against policy).
-
-Prints a summary and may ask for confirmation before any merge operation.
-
-=other release merge= (name subject to refinement)
-
-For =core_tester_branch=:
-
-Merges from =core_developer_branch=.
-
-For =release_tester_<N[.M]>=:
-
-Merges from =release_<N[.M]>=.
-
-Before merging, it:
-
-Lists any files under =$REPO_HOME/tester/**= that would change.
-
-Lists all executable files that would change anywhere.
-
-Prompts:
-
-“These paths are against policy (or shared infrastructure changes), are you sure?”
-
-5.4. Push and pull
-
-=push=
-
-Pre-flight:
-
-Diff current branch vs its upstream.
-
-Identify files outside =$REPO_HOME/tester/**=.
-
-If such files exist:
-
-List them, with executables highlighted.
-
-Prompt: “These are against policy, are you sure you want to push?”
-
-If confirmed:
-
-Runs =git push= to the branch’s upstream.
-
-=pull=
-
-Used to pull updates from the same tester branch on the remote (e.g. shared testing).
-
-Behavior:
-
-Lists files that will be changed, especially executables and non-tester paths.
-
-Prompts for confirmation before performing any merge or fast-forward.
-
-Developer changes are not brought in via =pull=; they come through the dedicated “merge from developer” commands.
-
-Publishing and versioning
-
-6.1. Publishing a new release (tester-driven)
-
-When tester deems core testing “done” and the team agrees:
-
-From =core_tester_branch=:
-
-Use =publish major [<major>] [<minor>]= or =publish minor= to create / bump the appropriate =release_<major>[.<minor>]= branch.
-
-Each publish:
-
-Creates a new =release_<major>[.<minor>]= branch if needed.
-
-Ensures that for a given major:
-
-First minor is =1= (no =.0= names).
-
-The corresponding =release_tester_<major>[.<minor>]= branch is used for any additional testing of that specific release.
-
-6.2. Minor and major increments
-
-=publish minor=
-
-For an existing major version:
-
-Examines all existing =release_<major>[.<minor>]= branches.
-
-Treats absent minor as =0= for ordering.
-
-Creates the next minor as =minor_last + 1=.
-
-Names it =release_<major>.<next_minor>=.
-
-=publish major [<major>] [<minor>]=
-
-Creates a new major version branch when a significant release is ready.
-
-If no major specified:
-
-Uses the next integer after the highest existing major.
-
-Starts with either:
-
-=release_<new_major>= (implicit .0), or
-
-=release_<new_major>.1= depending on the chosen policy for that project.
-
-If version specified and does not yet exist:
-
-Creates =release_<major>[.<minor>]= and corresponding tester branch.
-
-Summary
-
-7.1. Core ideas
-
-The project has a clear branch topology:
-
-=core_developer_branch= for development + releases.
-
-=core_tester_branch= for ongoing testing.
-
-=release_<major>[.<minor>]= and =release_tester_<major>[.<minor>]= for published releases and their tests.
-
-The filesystem is partitioned by role:
-
-Developer owns =developer/= and =release/= and shared code.
-
-Tester owns =tester/=.
-
-Shared code is edited on =core_developer_branch= and flows to testers via merges.
-
-Versioning is numeric and explicit:
-
-Major/minor pairs with implicit minor 0 when absent.
-
-No =.0= suffix in branch names; =release_3= is the =3.0= level.
-
-Tools (per-command CLIs) enforce policies softly:
-
-They detect and list out-of-policy changes.
-
-They always highlight executable changes.
-
-They ask for confirmation instead of silently permitting or hard-failing.
-
-Publishing is tester-driven:
-
-Tester (in coordination with the team) decides when a release is ready.
-
-Publishing creates or advances =release_* and =release_tester_* branches so that future testing and regression work can target exact versions.
+- =core_developer_branch=
+ - Single canonical development branch.
+ - Developer role users commits source changes and updated release artifacts here.
+ - Toolsmith role installs shared tools here
+
+** Tester branches
+
+- =core_tester_branch=
+ - Main testing branch.
+ - Derived from and merges from =core_developer_branch=.
+ - Tester does normal day-to-day work here.
+
+- =release_tester_<major>[.<minor>]=
+ - Created when a tester checks out a release branch
+ - Merges from an existing =release_<major>[.<minor>]= branch.
+ - For further testing of patched release branches
+
+** Release branches
+
+- =release_<major>[.<minor>]=
+ - Made by a tester publishing a test branch.
+ - Hence, core_developer_branch -merge-> core_tester_branch -publish-> release_branch
+ - There is never a =release_<major>.0=; that case is named =release_<major>=.
+ - =release_3= is treated as version =(3, 0)= for ordering.
+ - Though possible, editing release branches is discouraged due to maintenance issues, instead upgrade users to new releases.
+
+** Version ordering
+
+- Versions are ordered as integer pairs =(major, minor)=.
+- If the minor part is absent, treat it as =0= for ordering.
+- Comparison:
+ - =(M1, m1) > (M2, m2)= if:
+ - =M1 > M2=, or
+ - =M1 = M2= and =m1 > m2=.
+- Examples:
+ - =3.10 > 3.2= (minor 10 vs 2).
+ - =4.0 > 3.999= (any major 4 > any major 3).
+ - =release_3.2 > release_3= because =(3, 2) > (3, 0)=.
+
+* Directory layout and roles
+
+** Fixed directories
+
+- =$REPO_HOME= :: absolute project root.
+- Always present (Harmony skeleton):
+ - =$REPO_HOME/developer=
+ - =$REPO_HOME/tester=
+ - =$REPO_HOME/release=
+
+** Roles
+
+- Who
+ - one person can take-on multiple, or all, the roles
+ - multiple people can take-on one or more roles.
+ - Who is taking on which role is currently left to the team to organize.
+ in the future credentials might be required to login to roles.
+
+- =env_<role>= file
+ - role entered by sourcing the project top level =env_<role>= file.
+ - Harmony skeleton roles are: developer, toolsmith, and tester.
+ - The developer and tester role have their own working trees including a
+ local document and tool directory for role specific documents and tools.
+
+- Developer
+ - Works under =$REPO_HOME/developer= (e.g. after ~. ./env_developer~).
+ - Updates code and runs =release= to populate =$REPO_HOME/release=.
+ - Commits and pushes changes on =core_developer_branch=.
+
+- Tester
+ - Works under =$REPO_HOME/tester= (after ~. ./env_tester~).
+ - Writes tests and runs them against artifacts in =$REPO_HOME/release=.
+ - Commits tests on =core_tester_branch= or =release_tester_* branches.
+
+- Toolsmith
+ - Works on shared code under =tool_shared/= (not the git-ignored =tool_shared/third_party/=).
+ - Commits shared tool changes on =core_developer_branch=.
+ - Developer picks these up via ~git pull~ on =core_developer_branch=.
+ - Tester gets them when merging from =core_developer_branch= into tester branches.
+
+** third_party tools
+
+- =tool_shared/third_party/= is git-ignored.
+- Each user is responsible for their own copies there.
+- Install notes are in =tool_shared/document=
+- Shared, version-controlled tools live elsewhere (e.g. under =tool_shared/= but not in =third_party/=).
+
+* Policies
+
+** Tester write policy
+
+- Tester may only write under:
+ - =$REPO_HOME/tester/**=
+- It is against policy for tester-side pushes to include changes outside =$REPO_HOME/tester/**=.
+- Current tools enforce this softly:
+ - They list such paths and ask: “These are against policy, are you sure?”
+ - Tester may still proceed, but is expected to do so consciously.
+ - In future Harmony skeleton releases this might change
+
+** Developer vs tester domains
+
+- Developer domain:
+ - =developer/=
+ - =release/=
+ - Shared code (e.g. under =tool_shared/=, except git-ignored areas).
+ - Developer specific tools =developer/tool=
+ - Developer specific docs =developer/document=
+ - Developer work directories conventionally named after the compiler that will be used to process the contained files. See the directory naming convention document.
+ - For C and derived languages, RT uses a unified source and header approach, see the RT-gcc project for more details.
+ - Developer uses the local `release` tool to promote work product and make it visible to the tester role users.
+- Tester domain:
+ - =tester/= and its subtrees.
+- Toolsmith changes to shared code are made on =core_developer_branch= and flow to testers via merges.
+
+** Executable files
+
+- For any merge/pull helper that changes files:
+ - Executable files (based on mode bits) are always listed explicitly before confirmation.
+ - This applies to:
+ - Merges from developer branches.
+ - Pulls/merges from tester’s own remote branches.
+
+** Soft enforcement
+
+- Tools do not hard-fail on policy violations (for now).
+- Instead, they:
+ - Detect and list out-of-policy changes.
+ - Always highlight executable changes.
+ - Ask for confirmation:
+ - “These paths are against policy, are you sure you want to proceed?”
+- Future enforcement via permissions is possible but not assumed.
+
+* Workflows
+
+** Developer workflow (core_developer_branch)
+
+- Upfront work done by toolsmith
+ 1. Makes the central repository at remote called =github_repo= and/or at =reasoning_repo=. Check the =.git/config= of another project using the same remotes for details.
+ 2. Toolsmith clones the projects, and installs shared bespoke tools, and shared third party tools. He then creates documents and scripts under =tool_shared/document= explaining how to install third party tools. He also edits the `env_<role>` files, and the role =<role>/tool/env= files, role tool directories and role documents.
+ 3. Toolsmith helps developers make clones, making sure all remotes are pushed under the git target `pushall`, a that target is used by local `push` scripts.
+
+- Loop:
+ 1. Develope code under =developer/=.
+ 2. Put developer created ad hoc local tests into =developer/experiment= directory. Develop real tests as the test role. Sometimes tests will be moved from =developer/experiment= to to =tester=.
+ 3. Run =release= to update =$REPO_HOME/release= artifacts. A generic release program comes with the Harmony Skeleton. The developer might need to customize the local copy found in =developer/tool=.
+ 4. Commit changes (code + release artifacts + any shared tools). In a future version of the Harmony skeleton there will be a script =developer/tool/push= for this, but currently the developer runs git directly.
+ 5. Push to =origin/core_developer_branch=.
+- The state of =release/= on =core_developer_branch= defines what the tester will see next time they merge.
+
+** Tester workflow on core_tester_branch
+
+- Enter environment:
+ - ~. ./env_tester~
+ - Move to =core_tester_branch= (e.g. via =checkout core=).
+
+- Normal cycle:
+ 1. Run tests against current =$REPO_HOME/release=.
+ 2. Edit tests under =tester/=.
+ 3. Commit tests as needed.
+ 4. Optionally push to =origin/core_tester_branch= using the policy-aware =push= command.
+
+- Accepting a new core release:
+ 1. Use “list updates” / “merge from developer” helper:
+ - Compares =core_tester_branch= with =core_developer_branch=, focusing on =release/= (and possibly shared tools).
+ - Lists changed files and executable files.
+ 2. If acceptable:
+ - Merge =origin/core_developer_branch= into =core_tester_branch=.
+ - Commit / resolve as needed.
+ - Push updated =core_tester_branch=.
+
+** Tester workflow on release_tester_<major>[.<minor>]
+
+- When a published release needs regression or bug-fix testing:
+
+ 1. Identify target =release_<major>[.<minor>]=.
+ 2. Checkout the corresponding tester branch:
+ - =release_tester_<major>[.<minor>]=.
+
+- Testing cycle:
+ 1. Run tests against =$REPO_HOME/release= as it exists on this branch.
+ 2. Update tests under =tester/= if necessary.
+ 3. Commit to the =release_tester_* branch.
+ 4. Optionally push that branch for collaboration.
+
+- When hotfixes / updated releases appear:
+ - The corresponding =release_<major>[.<minor>]= may be updated on the developer side.
+ - The tester’s “release merge” helper can bring those changes into
+ =release_tester_<major>[.<minor>]= with a list of affected paths.
+
+** Toolsmith workflow
+
+- Toolsmith edits shared code (not =third_party/=) on =core_developer_branch=.
+- Steps:
+ 1. ~. ./env_developer~ (or a dedicated toolsmith env that still uses =core_developer_branch=).
+ 2. Modify shared code (e.g. under =tool_shared/=).
+ 3. Commit changes on =core_developer_branch=.
+ 4. Push to =origin/core_developer_branch=.
+- Developer picks up these changes via normal pulls.
+- Tester gets these changes when merging from =core_developer_branch= into tester branches.
+- Tools should highlight such changes (and executables) before merges so the tester knows when shared infrastructure has shifted.
+
+* Tester commands (conceptual)
+
+** General
+
+- Commands live under =$REPO_HOME/tester/tool/= and are on =PATH= after ~. ./env_tester~.
+- Each command:
+ - Has its own CLI parser.
+ - Calls worker functions in shared modules to do the real work.
+ - Does not call other commands via their CLIs (avoid loops).
+
+** Branch introspection
+
+- =where=
+ - Prints current branch name.
+ - Returns an error if current branch is *not* a testing branch:
+ - =core_tester_branch=, or
+ - Any =release_tester_<major>[.<minor>]=.
+
+- =checkout core=
+ - Switches to =core_tester_branch=.
+ - “Normal mode of work” for new testing.
+
+- =checkout release [<major>] [.<minor>]=
+ - Without arguments:
+ - Finds all =release_tester_<major>[.<minor>]= branches.
+ - Parses their versions as integer pairs.
+ - Selects the highest =(major, minor)= (with minor =0 when absent).
+ - With arguments:
+ - Attempts to checkout the specific =release_tester_<major>[.<minor>]= branch.
+ - Errors if that branch does not exist.
+
+** Release updates and merges
+
+- =list other release updates=
+ - Compares the current tester branch with its corresponding developer branch:
+ - =core_tester_branch= ↔ =core_developer_branch=
+ - =release_tester_<N[.M]> ↔ release_<N[.M]>=
+ - Restricts comparison to =$REPO_HOME/release/**=.
+ - Lists:
+ - Files newer on developer.
+ - Files unexpectedly newer on tester (highlighted as against policy).
+ - Prints a summary and may ask for confirmation before any merge.
+
+- =other release merge= (name TBD)
+ - For =core_tester_branch=:
+ - Merges from =core_developer_branch=.
+ - For =release_tester_<N[.M]>=:
+ - Merges from =release_<N[.M]>=.
+ - Before merging:
+ - Lists files under =$REPO_HOME/tester/**= that would change.
+ - Lists all executable files that would change anywhere.
+ - Prompts:
+ - “These paths are against policy (or shared infrastructure changes), are you sure?”
+
+** Push and pull
+
+- =push=
+ - Pre-flight:
+ - Diff current branch vs its upstream.
+ - Identify files outside =$REPO_HOME/tester/**=.
+ - If such files exist:
+ - List them, with executables highlighted.
+ - Prompt: “These are against policy, are you sure you want to push?”
+ - If confirmed:
+ - Runs =git push= to the branch’s upstream.
+
+- =pull=
+ - Used to pull updates from the *same tester branch* on the remote
+ (e.g. multiple testers collaborating).
+ - Behavior:
+ - Lists files that will be changed, especially executables and non-tester paths.
+ - Prompts for confirmation before performing any merge or fast-forward.
+ - Developer changes are not brought in via =pull=; they come through
+ the explicit “merge from developer” commands.
+
+* Publishing and versioning
+
+** Publishing a new release (tester-driven)
+
+- When tester deems the current state “publishable” and the team agrees:
+
+ - From =core_tester_branch=:
+ - Use =publish major [<major>] [<minor>]= or =publish minor= to create / bump the appropriate
+ =release_<major>[.<minor>]= branch.
+ - Each publish:
+ - Creates a new =release_<major>[.<minor>]= branch if needed.
+ - Ensures for a given major:
+ - The first minor is =1= (no =.0= in names).
+
+- The corresponding =release_tester_<major>[.<minor>]= branch is used for any additional testing of that specific release.
+
+** Minor and major increments
+
+- =publish minor=
+ - For an existing major version:
+ - Examines all existing =release_<major>[.<minor>]= branches.
+ - Treats absent minor as =0= for ordering.
+ - Creates the next minor as =minor_last + 1=.
+ - Names it =release_<major>.<next_minor>=.
+
+- =publish major [<major>] [<minor>]=
+ - Creates a new major version branch when a significant release is ready.
+ - If no major is specified:
+ - Uses the next integer after the highest existing major.
+ - Starts with a project-defined convention:
+ - Either =release_<new_major>= (implicit .0), or
+ - =release_<new_major>.1=.
+ - If a version is specified and does not yet exist:
+ - Creates =release_<major>[.<minor>]= and the corresponding tester branch.
+
+* Summary
+
+- One canonical developer branch: =core_developer_branch=.
+- Tester branches:
+ - =core_tester_branch= for ongoing testing.
+ - =release_tester_<major>[.<minor>]= for specific release testing.
+- Release branches:
+ - =release_<major>[.<minor>]= with numeric version ordering and implicit minor 0.
+- Tester owns =$REPO_HOME/tester/**=; pushes changing other areas are against policy and require explicit confirmation.
+- Shared tools are changed on =core_developer_branch= and flow to testers via controlled merges.
+- Per-command CLIs in =$REPO_HOME/tester/tool/= enforce policies softly, always listing executable changes and asking before doing anything risky.
--- /dev/null
+/home/Thomas/subu_data/developer/subu_data/subu/release/manager/CLI.py
\ No newline at end of file
+++ /dev/null
-/home/Thomas/subu_data/developer/subu_data/subu/release/manager/CLI.py
\ No newline at end of file
--- /dev/null
+#!/usr/bin/env -S python3 -B
+# developer — operations that compare/merge against developer/release branches:
+# developer branch list-new
+# developer branch pull
+
+import sys, subprocess
+from tester_lib import (
+ chdir_repo_root,
+ ensure_on_testing_branch_or_die,
+ get_developer_ref_for_merge,
+ git_diff_name_status,
+ print_changes_with_exec_marker,
+ fetch_remote,
+ changes_outside_tester,
+ prompt_yes_no,
+ RELEASE_ROOT_REL,
+ TESTER_ROOT_REL,
+)
+
+
+def _run_git(args):
+ return subprocess.run(["git"] + list(args), check=True)
+
+
+def usage() -> int:
+ print("""developer — tester-side view of developer/release branches
+
+Usage:
+ developer help
+ Show this message.
+
+ developer branch list-new
+ List changes under release/ between the current tester branch (HEAD)
+ and its corresponding developer/release branch.
+
+ developer branch pull
+ Merge from the corresponding developer/release branch into the current
+ tester branch, listing:
+ - changes in release/
+ - any changes under tester/ (policy warning)
+ - any other changed paths and executables
+ and asking for confirmation before the merge.
+""".rstrip())
+ return 0
+
+
+def cmd_branch_list_new() -> int:
+ repo_root = chdir_repo_root()
+ info = ensure_on_testing_branch_or_die()
+ dev_ref = get_developer_ref_for_merge(info.name)
+ if not dev_ref:
+ print(f"developer branch list-new: no corresponding developer branch for '{info.name}'.", file=sys.stderr)
+ return 1
+
+ fetch_remote("origin")
+ changes = git_diff_name_status("HEAD", dev_ref, paths=[RELEASE_ROOT_REL])
+
+ if not changes:
+ print(f"release/ is up to date with {dev_ref}.")
+ return 0
+
+ print(f"Changes in release/ between HEAD ({info.name}) and {dev_ref}:")
+ print_changes_with_exec_marker(changes, repo_root)
+ return 0
+
+
+def cmd_branch_pull() -> int:
+ repo_root = chdir_repo_root()
+ info = ensure_on_testing_branch_or_die()
+ dev_ref = get_developer_ref_for_merge(info.name)
+ if not dev_ref:
+ print(f"developer branch pull: no corresponding developer branch for '{info.name}'.", file=sys.stderr)
+ return 1
+
+ fetch_remote("origin")
+
+ # release/ changes
+ rel_changes = git_diff_name_status("HEAD", dev_ref, paths=[RELEASE_ROOT_REL])
+ print(f"Planned release/ changes from {info.name} -> {dev_ref}:")
+ print_changes_with_exec_marker(rel_changes, repo_root)
+
+ # tester/ changes (should be rare)
+ tester_changes = git_diff_name_status("HEAD", dev_ref, paths=[TESTER_ROOT_REL])
+ if tester_changes:
+ print("\nWARNING: these files under tester/ would change when merging from developer:")
+ print_changes_with_exec_marker(tester_changes, repo_root)
+ if not prompt_yes_no("These changes are against the usual policy. Proceed with merge?", default=False):
+ print("Aborting merge.")
+ return 1
+
+ # all other paths (shared tools etc.)
+ all_changes = git_diff_name_status("HEAD", dev_ref, paths=None)
+ non_tester = changes_outside_tester(all_changes)
+ other_non_release = [(s, p) for (s, p) in non_tester if not p.startswith(RELEASE_ROOT_REL + "/")]
+ if other_non_release:
+ print("\nOther files outside tester/ that will change (shared tools, etc.):")
+ print_changes_with_exec_marker(other_non_release, repo_root)
+
+ if not prompt_yes_no(f"Proceed with merge from {dev_ref} into {info.name}?", default=False):
+ print("Aborting merge.")
+ return 1
+
+ _run_git(["merge", dev_ref])
+ print(f"Merged {dev_ref} into {info.name}.")
+ return 0
+
+
+def CLI(argv=None) -> int:
+ if argv is None:
+ argv = sys.argv[1:]
+
+ if not argv or argv[0] in ("help", "-h", "--help"):
+ return usage()
+
+ if argv[0] != "branch":
+ print(f"developer: unknown top-level command '{argv[0]}' (expected 'branch' or 'help').", file=sys.stderr)
+ return 1
+
+ subargs = argv[1:]
+ if not subargs or subargs[0] in ("help", "-h", "--help"):
+ print("developer branch commands:")
+ print(" developer branch list-new")
+ print(" developer branch pull")
+ return 0
+
+ action = subargs[0]
+
+ if action == "list-new":
+ return cmd_branch_list_new()
+ if action == "pull":
+ return cmd_branch_pull()
+
+ print(f"developer branch: unknown action '{action}'.", file=sys.stderr)
+ return 1
+
+
+if __name__ == "__main__":
+ raise SystemExit(CLI())
--- /dev/null
+#!/usr/bin/env -S python3 -B
+# publish — create / bump release_<major>[.<minor>] branches
+#
+# Usage:
+# publish help
+# publish minor
+# publish major [<major>] [<minor>]
+
+import sys, subprocess
+from typing import List, Tuple
+from tester_lib import (
+ chdir_repo_root,
+ ensure_on_testing_branch_or_die,
+ RELEASE_PREFIX,
+ RELEASE_TEST_PREFIX, # kept if you later want to auto-create tester branches
+ get_release_branches,
+ choose_latest_release,
+)
+
+
+def _run_git(args):
+ return subprocess.run(["git"] + list(args), check=True)
+
+
+def usage() -> int:
+ print("""publish — create or bump release_<major>[.<minor>] branches
+
+Usage:
+ publish help
+ Show this message.
+
+ publish minor
+ Create a new minor release branch for the highest existing major.
+ - Finds all release_<major>[.<minor>] branches.
+ - Picks the highest major M.
+ - Computes next minor as (max_minor_for_M + 1) (minor=0 when absent).
+ - Creates release_<M>.<next_minor> from the current tester branch.
+ - Checks out the new release_<...> branch.
+
+ publish major [<major>] [<minor>]
+ Create a new major release branch.
+ - If no major is given:
+ use next integer after the highest existing major (or 0 if none).
+ - If no minor is given:
+ start at minor 1 → release_<major>.1
+ - If minor==0 is explicitly given:
+ create release_<major> (no .0 in the name).
+ - Branch is created from the current tester branch.
+ - Checks out the new release_<...> branch.
+""".rstrip())
+ return 0
+
+
+def _list_release_branches() -> List[Tuple[str, int, int]]:
+ branches = get_release_branches()
+ out: List[Tuple[str, int, int]] = []
+ for b in branches:
+ if b.major is None:
+ continue
+ out.append((b.name, b.major, b.minor or 0))
+ return out
+
+
+def _next_minor_for_major(major: int) -> int:
+ branches = _list_release_branches()
+ max_minor = 0
+ found = False
+ for _name, maj, minor in branches:
+ if maj == major:
+ found = True
+ if minor > max_minor:
+ max_minor = minor
+ if not found:
+ return 1 # first minor is 1
+ return max_minor + 1
+
+
+def publish_minor() -> int:
+ info = ensure_on_testing_branch_or_die()
+ branches = _list_release_branches()
+ if not branches:
+ print("publish minor: no existing release_<major> branches. Use 'publish major' first.", file=sys.stderr)
+ return 1
+
+ branches_sorted = sorted(branches, key=lambda t: (t[1], t[2]))
+ highest_major = branches_sorted[-1][1]
+ next_minor = _next_minor_for_major(highest_major)
+
+ rel_name = f"{RELEASE_PREFIX}{highest_major}.{next_minor}"
+
+ print(f"Creating new minor release branch {rel_name} from {info.name}...")
+ _run_git(["branch", rel_name])
+ print(f"Checking out {rel_name}...")
+ _run_git(["checkout", rel_name])
+ return 0
+
+
+def publish_major(major_arg: str = None, minor_arg: str = None) -> int:
+ info = ensure_on_testing_branch_or_die()
+ branches = _list_release_branches()
+
+ if major_arg is None:
+ if branches:
+ highest_major = max(maj for (_n, maj, _m) in branches)
+ new_major = highest_major + 1
+ else:
+ new_major = 0
+ else:
+ new_major = int(major_arg)
+
+ if minor_arg is None:
+ new_minor = 1
+ else:
+ new_minor = int(minor_arg)
+
+ if new_minor == 0:
+ rel_name = f"{RELEASE_PREFIX}{new_major}"
+ else:
+ rel_name = f"{RELEASE_PREFIX}{new_major}.{new_minor}"
+
+ cp = subprocess.run(["git", "branch", "--list", rel_name], text=True, stdout=subprocess.PIPE, check=True)
+ if cp.stdout.strip():
+ print(f"publish major: branch {rel_name} already exists.", file=sys.stderr)
+ else:
+ print(f"Creating new major release branch {rel_name} from {info.name}...")
+ _run_git(["branch", rel_name])
+
+ print(f"Checking out {rel_name}...")
+ _run_git(["checkout", rel_name])
+ return 0
+
+
+def CLI(argv=None) -> int:
+ chdir_repo_root()
+ if argv is None:
+ argv = sys.argv[1:]
+
+ if not argv or argv[0] in ("help", "-h", "--help"):
+ return usage()
+
+ sub = argv[0]
+ rest = argv[1:]
+
+ if sub == "minor":
+ if rest:
+ print("publish minor: unexpected extra arguments.", file=sys.stderr)
+ return 1
+ return publish_minor()
+
+ if sub == "major":
+ major = rest[0] if len(rest) >= 1 else None
+ minor = rest[1] if len(rest) >= 2 else None
+ if len(rest) > 2:
+ print("publish major: too many arguments.", file=sys.stderr)
+ return 1
+ return publish_major(major, minor)
+
+ print(f"publish: unknown subcommand '{sub}' (expected 'help', 'minor', or 'major').", file=sys.stderr)
+ return 1
+
+
+if __name__ == "__main__":
+ raise SystemExit(CLI())
--- /dev/null
+#!/usr/bin/env -S python3 -B
+# tester — tester branch utilities:
+# tester branch select core
+# tester branch select release [<major>[.<minor>]]
+# tester branch show-name
+# tester branch pull
+# tester branch push
+
+import sys, subprocess
+from tester_lib import (
+ chdir_repo_root,
+ TEST_CORE_BRANCH,
+ get_current_branch,
+ is_testing_branch,
+ get_upstream_ref_for_current_branch,
+ git_diff_name_status,
+ print_changes_with_exec_marker,
+ changes_outside_tester,
+ prompt_yes_no,
+ get_release_branches,
+ choose_latest_release,
+ RELEASE_PREFIX,
+ RELEASE_TEST_PREFIX,
+)
+
+
+def _run_git(args):
+ return subprocess.run(["git"] + list(args), check=True)
+
+
+def usage() -> int:
+ print("""tester — tester-side branch commands
+
+Usage:
+ tester help
+ Show this message.
+
+ tester branch show-name
+ Print current branch and error if not a tester branch.
+
+ tester branch select core
+ Checkout core_tester_branch.
+
+ tester branch select release [<major>[.<minor>]]
+ Select a release_tester_<major>[.<minor>] branch.
+ - If no version is given: pick the highest existing release_<major>[.<minor>] by version.
+ - If the corresponding release_tester_* branch does not exist yet:
+ it is created from release_<major>[.<minor>] and checked out.
+
+ tester branch pull
+ Pull from the current branch's upstream (same tester branch on remote),
+ listing incoming changes and executables and asking for confirmation.
+
+ tester branch push
+ Push the current tester branch to its upstream, listing outgoing changes.
+ If any changes are outside $REPO_HOME/tester/, a policy warning and
+ confirmation prompt is shown.
+""".rstrip())
+ return 0
+
+
+# ----- branch show-name -----
+
+def cmd_branch_show_name() -> int:
+ chdir_repo_root()
+ name = get_current_branch()
+ print(name)
+ if not is_testing_branch(name):
+ print(f"tester: '{name}' is not a tester branch (core_tester_branch or release_tester_*).", file=sys.stderr)
+ return 1
+ return 0
+
+
+# ----- branch select -----
+
+def _parse_version_arg(version: str):
+ if "." in version:
+ major_s, minor_s = version.split(".", 1)
+ else:
+ major_s, minor_s = version, None
+ return major_s, minor_s
+
+
+def _select_release_specific(major_s: str, minor_s: str) -> int:
+ # construct release_<...> name
+ if minor_s is None:
+ rel_name = f"{RELEASE_PREFIX}{major_s}"
+ tester_name = f"{RELEASE_TEST_PREFIX}{major_s}"
+ else:
+ rel_name = f"{RELEASE_PREFIX}{major_s}.{minor_s}"
+ tester_name = f"{RELEASE_TEST_PREFIX}{major_s}.{minor_s}"
+
+ # ensure base release branch exists
+ cp = subprocess.run(
+ ["git", "branch", "--list", rel_name],
+ text=True, stdout=subprocess.PIPE, check=True
+ )
+ if not cp.stdout.strip():
+ print(f"tester: release branch '{rel_name}' does not exist.", file=sys.stderr)
+ return 1
+
+ # if release_tester branch does not exist, create it from release_<...>
+ cp2 = subprocess.run(
+ ["git", "branch", "--list", tester_name],
+ text=True, stdout=subprocess.PIPE, check=True
+ )
+ if not cp2.stdout.strip():
+ print(f"Creating tester branch {tester_name} from {rel_name}...")
+ _run_git(["branch", tester_name, rel_name])
+
+ # checkout tester branch
+ _run_git(["checkout", tester_name])
+ return 0
+
+
+def cmd_branch_select(args: list[str]) -> int:
+ chdir_repo_root()
+ if not args:
+ print("tester branch select: missing sub-argument (core|release).", file=sys.stderr)
+ return 1
+ sub = args[0]
+ if sub == "core":
+ _run_git(["checkout", TEST_CORE_BRANCH])
+ return 0
+ if sub == "release":
+ if len(args) == 1:
+ # pick latest release_<...> by version
+ rel_branches = get_release_branches()
+ info = choose_latest_release(rel_branches)
+ if info is None:
+ print("tester: no release_<major>[.<minor>] branches found.", file=sys.stderr)
+ return 1
+ major = info.major
+ minor = info.minor
+ major_s = str(major)
+ minor_s = None if minor is None or minor == 0 else str(minor)
+ return _select_release_specific(major_s, minor_s)
+ else:
+ major_s, minor_s = _parse_version_arg(args[1])
+ return _select_release_specific(major_s, minor_s or None)
+
+ print(f"tester branch select: unknown mode '{sub}' (expected 'core' or 'release').", file=sys.stderr)
+ return 1
+
+
+# ----- branch pull -----
+
+def cmd_branch_pull() -> int:
+ repo_root = chdir_repo_root()
+ branch = get_current_branch()
+ upstream = get_upstream_ref_for_current_branch()
+ if not upstream:
+ print(f"tester pull: no upstream configured for branch '{branch}'.", file=sys.stderr)
+ return 1
+
+ # HEAD..upstream
+ changes = git_diff_name_status("HEAD", upstream, paths=None)
+ if not changes:
+ print(f"{branch} is up to date with {upstream}.")
+ return 0
+
+ print(f"Incoming changes from {upstream} into {branch}:")
+ print_changes_with_exec_marker(changes, repo_root)
+
+ if not prompt_yes_no("Apply these changes with git pull?", default=False):
+ print("Aborting pull.")
+ return 1
+
+ _run_git(["pull"])
+ return 0
+
+
+# ----- branch push -----
+
+def cmd_branch_push() -> int:
+ repo_root = chdir_repo_root()
+ branch = get_current_branch()
+ upstream = get_upstream_ref_for_current_branch()
+ if not upstream:
+ print(f"tester push: no upstream configured for branch '{branch}'.", file=sys.stderr)
+ print("You may want to set it with: git branch --set-upstream-to origin/<branch>", file=sys.stderr)
+ return 1
+
+ # upstream..HEAD
+ changes = git_diff_name_status(upstream, "HEAD", paths=None)
+ if not changes:
+ print(f"Nothing to push (HEAD is same as {upstream}).")
+ return 0
+
+ print(f"Changes to be pushed from {branch} to {upstream}:")
+ print_changes_with_exec_marker(changes, repo_root)
+
+ outside = changes_outside_tester(changes)
+ if outside:
+ print("\nWARNING: the following paths are outside $REPO_HOME/tester/ and are against policy:")
+ print_changes_with_exec_marker(outside, repo_root)
+ if not prompt_yes_no("These are against policy. Are you sure you want to push?", default=False):
+ print("Aborting push.")
+ return 1
+
+ if not prompt_yes_no(f"Proceed with git push to {upstream}?", default=True):
+ print("Aborting push.")
+ return 1
+
+ _run_git(["push"])
+ return 0
+
+
+def CLI(argv=None) -> int:
+ if argv is None:
+ argv = sys.argv[1:]
+
+ if not argv or argv[0] in ("help", "-h", "--help"):
+ return usage()
+
+ if argv[0] != "branch":
+ print(f"tester: unknown top-level command '{argv[0]}' (expected 'branch' or 'help').", file=sys.stderr)
+ return 1
+
+ subargs = argv[1:]
+ if not subargs or subargs[0] in ("help", "-h", "--help"):
+ print("tester branch commands:")
+ print(" tester branch show-name")
+ print(" tester branch select core")
+ print(" tester branch select release [<major>[.<minor>]]")
+ print(" tester branch pull")
+ print(" tester branch push")
+ return 0
+
+ action = subargs[0]
+ rest = subargs[1:]
+
+ if action == "show-name":
+ return cmd_branch_show_name()
+ if action == "select":
+ return cmd_branch_select(rest)
+ if action == "pull":
+ return cmd_branch_pull()
+ if action == "push":
+ return cmd_branch_push()
+
+ print(f"tester branch: unknown action '{action}'.", file=sys.stderr)
+ return 1
+
+
+if __name__ == "__main__":
+ raise SystemExit(CLI())
--- /dev/null
+#!/usr/bin/env -S python3 -B
+# tester_lib.py — shared helpers for tester tools
+
+import os, sys, subprocess
+from dataclasses import dataclass
+from typing import List, Tuple, Optional
+
+
+DEV_BRANCH = "core_developer_branch"
+TEST_CORE_BRANCH = "core_tester_branch"
+RELEASE_PREFIX = "release_"
+RELEASE_TEST_PREFIX = "release_tester_"
+
+TESTER_ROOT_REL = "tester"
+DEVELOPER_ROOT_REL = "developer"
+RELEASE_ROOT_REL = "release"
+
+
+class BranchKind:
+ CORE_DEV = "core_developer"
+ CORE_TEST = "core_tester"
+ RELEASE = "release"
+ RELEASE_TEST = "release_tester"
+ OTHER = "other"
+
+
+@dataclass
+class BranchInfo:
+ name: str
+ kind: str
+ major: Optional[int] = None
+ minor: Optional[int] = None
+
+
+def _run_git(args, capture_output=True, check=True) -> subprocess.CompletedProcess:
+ cmd = ["git"] + list(args)
+ if capture_output:
+ return subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=check)
+ return subprocess.run(cmd, text=True, check=check)
+
+
+def get_repo_root() -> str:
+ rh = os.environ.get("REPO_HOME")
+ if rh:
+ return os.path.abspath(rh)
+ try:
+ cp = _run_git(["rev-parse", "--show-toplevel"])
+ return cp.stdout.strip()
+ except Exception as e:
+ print(f"tester_lib: cannot determine repo root: {e}", file=sys.stderr)
+ sys.exit(1)
+
+
+def chdir_repo_root() -> str:
+ root = get_repo_root()
+ os.chdir(root)
+ return root
+
+
+def get_current_branch() -> str:
+ cp = _run_git(["rev-parse", "--abbrev-ref", "HEAD"])
+ return cp.stdout.strip()
+
+
+def _parse_version_from_name(name: str, prefix: str) -> Optional[Tuple[int, int]]:
+ """
+ Parse branch names like:
+ prefix + "<major>" or prefix + "<major>.<minor>"
+
+ Returns (major, minor) with minor defaulting to 0 if absent.
+ """
+ if not name.startswith(prefix):
+ return None
+ tail = name[len(prefix):]
+ if not tail:
+ return None
+ if "." in tail:
+ major_s, minor_s = tail.split(".", 1)
+ else:
+ major_s, minor_s = tail, "0"
+ try:
+ major = int(major_s)
+ minor = int(minor_s)
+ except ValueError:
+ return None
+ return (major, minor)
+
+
+def classify_branch(name: str) -> BranchInfo:
+ if name == DEV_BRANCH:
+ return BranchInfo(name=name, kind=BranchKind.CORE_DEV)
+ if name == TEST_CORE_BRANCH:
+ return BranchInfo(name=name, kind=BranchKind.CORE_TEST)
+
+ v = _parse_version_from_name(name, RELEASE_PREFIX)
+ if v is not None:
+ major, minor = v
+ return BranchInfo(name=name, kind=BranchKind.RELEASE, major=major, minor=minor)
+
+ v = _parse_version_from_name(name, RELEASE_TEST_PREFIX)
+ if v is not None:
+ major, minor = v
+ return BranchInfo(name=name, kind=BranchKind.RELEASE_TEST, major=major, minor=minor)
+
+ return BranchInfo(name=name, kind=BranchKind.OTHER)
+
+
+def is_testing_branch(name: str) -> bool:
+ info = classify_branch(name)
+ return info.kind in (BranchKind.CORE_TEST, BranchKind.RELEASE_TEST)
+
+
+def get_release_tester_branches() -> List[BranchInfo]:
+ cp = _run_git(["branch", "--list", f"{RELEASE_TEST_PREFIX}*"])
+ branches: List[BranchInfo] = []
+ for line in cp.stdout.splitlines():
+ line = line.strip()
+ if not line:
+ continue
+ if line.startswith("* "):
+ line = line[2:]
+ info = classify_branch(line)
+ if info.kind == BranchKind.RELEASE_TEST and info.major is not None:
+ branches.append(info)
+ return branches
+
+
+def get_release_branches() -> List[BranchInfo]:
+ cp = _run_git(["branch", "--list", f"{RELEASE_PREFIX}*"])
+ branches: List[BranchInfo] = []
+ for line in cp.stdout.splitlines():
+ line = line.strip()
+ if not line:
+ continue
+ if line.startswith("* "):
+ line = line[2:]
+ info = classify_branch(line)
+ if info.kind == BranchKind.RELEASE and info.major is not None:
+ branches.append(info)
+ return branches
+
+
+def choose_latest_release(branches: List[BranchInfo]) -> Optional[BranchInfo]:
+ if not branches:
+ return None
+ sorted_br = sorted(
+ branches,
+ key=lambda b: (b.major if b.major is not None else -1,
+ b.minor if b.minor is not None else -1)
+ )
+ return sorted_br[-1]
+
+
+def choose_latest_release_tester(branches: List[BranchInfo]) -> Optional[BranchInfo]:
+ return choose_latest_release(branches)
+
+
+def corresponding_developer_branch(test_branch: str) -> Optional[str]:
+ info = classify_branch(test_branch)
+ if info.kind == BranchKind.CORE_TEST:
+ return DEV_BRANCH
+ if info.kind == BranchKind.RELEASE_TEST and info.major is not None:
+ if info.minor and info.minor != 0:
+ return f"{RELEASE_PREFIX}{info.major}.{info.minor}"
+ return f"{RELEASE_PREFIX}{info.major}"
+ return None
+
+
+def get_developer_ref_for_merge(test_branch: str) -> Optional[str]:
+ dev_branch = corresponding_developer_branch(test_branch)
+ if not dev_branch:
+ return None
+ return f"origin/{dev_branch}"
+
+
+def get_upstream_ref_for_current_branch() -> Optional[str]:
+ try:
+ cp = _run_git(["rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"])
+ except subprocess.CalledProcessError:
+ return None
+ return cp.stdout.strip()
+
+
+def git_diff_name_status(from_ref: str, to_ref: str, paths: Optional[List[str]] = None) -> List[Tuple[str, str]]:
+ args = ["diff", "--name-status", f"{from_ref}..{to_ref}"]
+ if paths:
+ args.append("--")
+ args.extend(paths)
+ cp = _run_git(args)
+ out: List[Tuple[str, str]] = []
+ for line in cp.stdout.splitlines():
+ if not line.strip():
+ continue
+ parts = line.split("\t", 1)
+ if len(parts) != 2:
+ continue
+ status, path = parts
+ out.append((status.strip(), path.strip()))
+ return out
+
+
+def list_executable_flags(paths: List[str], repo_root: str) -> List[str]:
+ exec_paths: List[str] = []
+ for p in paths:
+ fs_path = os.path.join(repo_root, p)
+ if os.path.exists(fs_path) and os.access(fs_path, os.X_OK):
+ exec_paths.append(p)
+ return exec_paths
+
+
+def print_changes_with_exec_marker(changes: List[Tuple[str, str]], repo_root: str) -> None:
+ if not changes:
+ print(" (no changes)")
+ return
+ paths = [p for _s, p in changes]
+ execs = set(list_executable_flags(paths, repo_root))
+ for status, path in changes:
+ mark = " [EXEC]" if path in execs else ""
+ print(f" {status}\t{path}{mark}")
+
+
+def prompt_yes_no(msg: str, default: bool = False) -> bool:
+ suffix = "[y/N]" if not default else "[Y/n]"
+ while True:
+ ans = input(f"{msg} {suffix} ").strip().lower()
+ if not ans:
+ return default
+ if ans in ("y", "yes"):
+ return True
+ if ans in ("n", "no"):
+ return False
+ print("Please answer y or n.")
+
+
+def is_under_tester_tree(rel_path: str) -> bool:
+ parts = rel_path.split(os.sep)
+ return bool(parts) and parts[0] == TESTER_ROOT_REL
+
+
+def changes_outside_tester(changes: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
+ return [(s, p) for (s, p) in changes if not is_under_tester_tree(p)]
+
+
+def fetch_remote(remote: str = "origin") -> None:
+ _run_git(["fetch", remote], capture_output=False, check=True)
+
+
+def ensure_on_testing_branch_or_die() -> BranchInfo:
+ name = get_current_branch()
+ info = classify_branch(name)
+ if info.kind not in (BranchKind.CORE_TEST, BranchKind.RELEASE_TEST):
+ print(f"Error: current branch '{name}' is not a testing branch (core_tester or release_tester_*).", file=sys.stderr)
+ sys.exit(1)
+ return info