+++ /dev/null
-#!/bin/bash
-# launch_subu.sh — Start a subuser shell (console or GUI-aware, with systemd user session)
-
-set -euo pipefail
-umask 0077
-
-subu="$1"
-if [ -z "$subu" ]; then
- echo "❌ No subuser name supplied"
- exit 1
-fi
-
-subu_user="Thomas-$subu"
-if ! id "$subu_user" &>/dev/null; then
- echo "❌ User $subu_user does not exist"
- exit 1
-fi
-
-# Check required commands
-error_flag=0
-for cmd in machinectl xauth xhost dbus-run-session; do
- if ! command -v "$cmd" &>/dev/null; then
- echo "❌ $cmd not found"
- error_flag=1
- fi
-done
-if [ "$error_flag" -eq 1 ]; then
- exit 1
-fi
-
-# don't use sudo -v, because it will echo the password into the emacs shell
-sudo echo >& /dev/null
-
-
-# Something broke when I turned this off. What was it. Will have to turn it off again and
-# test.
-#
-# Enable lingering so user services can persist
-sudo loginctl enable-linger "$subu_user"
-
-# Decide how to set the use_xauth and use_xhost flags.
-#
-# As of the time of this writing, on my machines, Wayland insists on
-# xauth, while my X11 is refuses to use it, thus it needs xhost control.
-# So this is how I determine how to set the flags here.
-#
-
-# bash will evaluate this variables inside a quoted if even when the
-# gate is falase, so everything needs to be initialized, whether used
-# or not.
-subu_Xauthority_path=""
-use_xauth=0
-use_xhost=0
-if [[ -n "${WAYLAND_DISPLAY:-}" ]]; then
- has_display=true
- XDG_SESSION_TYPE="wayland"
- subu_Xauthority_path="$HOME/subu/$subu/.Xauthority"
- use_xauth=1
- use_xhost=0
- echo "🌀 Wayland session - Using xauth for access control"
-
-elif [[ -n "${DISPLAY:-}" ]]; then
- has_display=true
- XDG_SESSION_TYPE="x11"
- use_xauth=0
- use_xhost=1
- echo "🧱 X11 session - Using xhost for access control"
-
-else
- has_display=false
- XDG_SESSION_TYPE="tty"
- use_xauth=0
- use_xhost=0
- echo "🖳 Console session (no X detected)"
-fi
-
-if [[ "$use_xhost" -eq 1 ]]; then
- xhost +SI:localuser:"$subu_user"
-fi
-if [[ "$use_xauth" -eq 1 ]]; then
- mkdir -p "$(dirname "$subu_Xauthority_path")"
- touch "$subu_Xauthority_path"
- xauth extract "$subu_Xauthority_path" "$DISPLAY"
-fi
-
-if $has_display; then
-
-
- sudo machinectl shell "$subu_user"@ /bin/bash -c "
-
- # --- session env from parent ---
- export DISPLAY=\"${DISPLAY:-${WAYLAND_DISPLAY}}\";
- export XDG_RUNTIME_DIR='/run/user/$(id -u "$subu_user")';
- export XDG_SESSION_TYPE=\"$XDG_SESSION_TYPE\";
- export XDG_SESSION_CLASS=\"user\";
- export XDG_DATA_DIRS=\"/usr/share/gnome:/usr/local/share/:/usr/share/\";
- export USE_XAUTH=$use_xauth
-
- # Only set XAUTHORITY when we actually prepared it (Wayland/xauth case)
- if [[ \"\$USE_XAUTH\" -eq 1 ]]; then
- export XAUTHORITY=\"$subu_Xauthority_path\"
- fi
-
- if command -v /usr/bin/gnome-keyring-daemon &>/dev/null; then
- eval \$(/usr/bin/gnome-keyring-daemon --start)
- export GNOME_KEYRING_CONTROL GNOME_KEYRING_PID
- fi
-
- # WirePlumber: ignore logind (subuser isn't the active seat)
- systemctl --user set-environment WIREPLUMBER_DISABLE_PLUGINS=logind
- systemctl --user import-environment DISPLAY XAUTHORITY WAYLAND_DISPLAY XDG_RUNTIME_DIR XDG_SESSION_TYPE
-
- # Bring up audio (sockets first, then services)
- systemctl --user enable --now pipewire.socket pipewire-pulse.socket >/dev/null 2>&1 || true
- systemctl --user restart wireplumber pipewire pipewire-pulse
-
- exec dbus-run-session -- bash -l
- "
-
-else
-
- # Console mode with DBus session (give it audio too)
- sudo machinectl shell "$subu_user"@ /bin/bash -c "
- export XDG_RUNTIME_DIR='/run/user/$(id -u "$subu_user")}';
-
- systemctl --user set-environment WIREPLUMBER_DISABLE_PLUGINS=logind
- systemctl --user import-environment XDG_RUNTIME_DIR
- systemctl --user enable --now pipewire.socket pipewire-pulse.socket >/dev/null 2>&1 || true
- systemctl --user restart wireplumber pipewire pipewire-pulse
-
- exec dbus-run-session -- bash -l
- "
-fi
-
-
--- /dev/null
+#!/bin/bash
+# launch_subu.sh — Start a subuser shell (console or GUI-aware, with systemd user session)
+
+set -euo pipefail
+umask 0077
+
+subu="$1"
+if [ -z "$subu" ]; then
+ echo "❌ No subuser name supplied"
+ exit 1
+fi
+
+subu_user="Thomas_$subu"
+if ! id "$subu_user" &>/dev/null; then
+ echo "❌ User $subu_user does not exist"
+ exit 1
+fi
+
+# Check required commands
+error_flag=0
+for cmd in machinectl xauth xhost dbus-run-session; do
+ if ! command -v "$cmd" &>/dev/null; then
+ echo "❌ $cmd not found"
+ error_flag=1
+ fi
+done
+if [ "$error_flag" -eq 1 ]; then
+ exit 1
+fi
+
+# don't use sudo -v, because it will echo the password into the emacs shell
+sudo echo >& /dev/null
+
+
+# Something broke when I turned this off. What was it. Will have to turn it off again and
+# test.
+#
+# Enable lingering so user services can persist
+sudo loginctl enable-linger "$subu_user"
+
+# Decide how to set the use_xauth and use_xhost flags.
+#
+# As of the time of this writing, on my machines, Wayland insists on
+# xauth, while my X11 is refuses to use it, thus it needs xhost control.
+# So this is how I determine how to set the flags here.
+#
+
+# bash will evaluate this variables inside a quoted if even when the
+# gate is falase, so everything needs to be initialized, whether used
+# or not.
+subu_Xauthority_path=""
+use_xauth=0
+use_xhost=0
+if [[ -n "${WAYLAND_DISPLAY:-}" ]]; then
+ has_display=true
+ XDG_SESSION_TYPE="wayland"
+ subu_Xauthority_path="$HOME/subu/$subu/.Xauthority"
+ use_xauth=1
+ use_xhost=0
+ echo "🌀 Wayland session - Using xauth for access control"
+
+elif [[ -n "${DISPLAY:-}" ]]; then
+ has_display=true
+ XDG_SESSION_TYPE="x11"
+ use_xauth=0
+ use_xhost=1
+ echo "🧱 X11 session - Using xhost for access control"
+
+else
+ has_display=false
+ XDG_SESSION_TYPE="tty"
+ use_xauth=0
+ use_xhost=0
+ echo "🖳 Console session (no X detected)"
+fi
+
+if [[ "$use_xhost" -eq 1 ]]; then
+ xhost +SI:localuser:"$subu_user"
+fi
+if [[ "$use_xauth" -eq 1 ]]; then
+ mkdir -p "$(dirname "$subu_Xauthority_path")"
+ touch "$subu_Xauthority_path"
+ xauth extract "$subu_Xauthority_path" "$DISPLAY"
+fi
+
+if $has_display; then
+
+
+ sudo machinectl shell "$subu_user"@ /bin/bash -c "
+
+ # --- session env from parent ---
+ export DISPLAY=\"${DISPLAY:-${WAYLAND_DISPLAY}}\";
+ export XDG_RUNTIME_DIR='/run/user/$(id -u "$subu_user")';
+ export XDG_SESSION_TYPE=\"$XDG_SESSION_TYPE\";
+ export XDG_SESSION_CLASS=\"user\";
+ export XDG_DATA_DIRS=\"/usr/share/gnome:/usr/local/share/:/usr/share/\";
+ export USE_XAUTH=$use_xauth
+
+ # Only set XAUTHORITY when we actually prepared it (Wayland/xauth case)
+ if [[ \"\$USE_XAUTH\" -eq 1 ]]; then
+ export XAUTHORITY=\"$subu_Xauthority_path\"
+ fi
+
+ if command -v /usr/bin/gnome-keyring-daemon &>/dev/null; then
+ eval \$(/usr/bin/gnome-keyring-daemon --start)
+ export GNOME_KEYRING_CONTROL GNOME_KEYRING_PID
+ fi
+
+ # WirePlumber: ignore logind (subuser isn't the active seat)
+ systemctl --user set-environment WIREPLUMBER_DISABLE_PLUGINS=logind
+ systemctl --user import-environment DISPLAY XAUTHORITY WAYLAND_DISPLAY XDG_RUNTIME_DIR XDG_SESSION_TYPE
+
+ # Bring up audio (sockets first, then services)
+ systemctl --user enable --now pipewire.socket pipewire-pulse.socket >/dev/null 2>&1 || true
+ systemctl --user restart wireplumber pipewire pipewire-pulse
+
+ exec dbus-run-session -- bash -l
+ "
+
+else
+
+ # Console mode with DBus session (give it audio too)
+ sudo machinectl shell "$subu_user"@ /bin/bash -c "
+ export XDG_RUNTIME_DIR='/run/user/$(id -u "$subu_user")}';
+
+ systemctl --user set-environment WIREPLUMBER_DISABLE_PLUGINS=logind
+ systemctl --user import-environment XDG_RUNTIME_DIR
+ systemctl --user enable --now pipewire.socket pipewire-pulse.socket >/dev/null 2>&1 || true
+ systemctl --user restart wireplumber pipewire pipewire-pulse
+
+ exec dbus-run-session -- bash -l
+ "
+fi
+
+
+++ /dev/null
-#!/usr/bin/env python3
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-"""CLI.py — subu manager front-end.
-
-Role: parse argv, choose command, call dispatch.
-
-CLI should not do any work beyond:
-
- * figure out program_name (for example, manager/CLI.py or wrapper name)
- * call the right function in dispatch
- * print text from text.py when needed
- * exit with the returned status code
-"""
-
-import os, sys, argparse
-from text import make_text
-import dispatch
-
-
-def register_device_commands(subparsers):
- """
- Register device-related commands:
-
- device scan [--base-dir DIR]
-
- For v1, we only support scanning already-mounted devices under /mnt.
- """
- ap = subparsers.add_parser("device")
- ap.add_argument(
- "action",
- choices =["scan"],
- )
- ap.add_argument(
- "--base-dir",
- default ="/mnt",
- help ="root under which to scan for <mapname>/user_data (default: /mnt)",
- )
-
-
-def register_db_commands(subparsers):
- """Register DB-related commands under 'db'.
-
- db load schema
- """
- ap_db = subparsers.add_parser("db")
- db_sub = ap_db.add_subparsers(dest="db_verb")
-
- ap = db_sub.add_parser("load")
- ap.add_argument("what", choices=["schema"])
-
-
-def register_subu_commands(subparsers):
- """Register subu related commands under 'subu':
-
- subu make <masu> <subu> [<subu>]*
- subu capture <masu> <subu> [<subu>]*
- subu remove <Subu_ID> | <masu> <subu> [<subu>]*
- subu list
- subu info <Subu_ID> | <masu> <subu> [<subu>]*
- subu option set|clear incommon <Subu_ID> | <masu> <subu> [<subu>]*
- """
- ap_subu = subparsers.add_parser("subu")
- subu_sub = ap_subu.add_subparsers(dest="subu_verb")
-
- # make: path[0] is masu, remaining elements are the subu chain
- ap = subu_sub.add_parser("make")
- ap.add_argument("path", nargs="+")
-
- # capture: path[0] is masu, remaining elements are the subu chain
- ap = subu_sub.add_parser("capture")
- ap.add_argument("path", nargs="+")
-
- # remove: either ID or path
- ap = subu_sub.add_parser("remove")
- ap.add_argument("target")
- ap.add_argument("rest", nargs="*")
-
- # list
- subu_sub.add_parser("list")
-
- # info
- ap = subu_sub.add_parser("info")
- ap.add_argument("target")
- ap.add_argument("rest", nargs="*")
-
- # option incommon
- ap = subu_sub.add_parser("option")
- ap.add_argument("opt_action", choices=["set", "clear"])
- ap.add_argument("opt_name", choices=["incommon"])
- ap.add_argument("target")
- ap.add_argument("rest", nargs="*")
-
-
-def register_wireguard_commands(subparsers):
- """Register WireGuard related commands, grouped under 'WG'."""
- ap = subparsers.add_parser("WG")
- ap.add_argument(
- "wg_verb",
- choices=[
- "global",
- "make",
- "server_provided_public_key",
- "info",
- "information",
- "up",
- "down",
- ],
- )
- ap.add_argument("arg1", nargs="?")
- ap.add_argument("arg2", nargs="?")
-
-
-def register_attach_commands(subparsers):
- """Register attach and detach commands:
-
- attach WG <Subu_ID> <WG_ID>
- detach WG <Subu_ID>
- """
- ap = subparsers.add_parser("attach")
- ap.add_argument("what", choices=["WG"])
- ap.add_argument("subu_id")
- ap.add_argument("wg_id")
-
- ap = subparsers.add_parser("detach")
- ap.add_argument("what", choices=["WG"])
- ap.add_argument("subu_id")
-
-
-def register_network_commands(subparsers):
- """Register network aggregate commands:
-
- network up|down <Subu_ID>
- """
- ap = subparsers.add_parser("network")
- ap.add_argument("state", choices=["up", "down"])
- ap.add_argument("subu_id")
-
-
-def register_option_commands(subparsers):
- """Register global option commands (non-subu-specific for now):
-
- option set|get|list ...
- """
- ap = subparsers.add_parser("option")
- ap.add_argument("action", choices=["set", "get", "list"])
- ap.add_argument("subu_id")
- ap.add_argument("name", nargs="?")
- ap.add_argument("value", nargs="?")
-
-
-def register_exec_commands(subparsers):
- """Register exec command:
-
- exec <Subu_ID> -- <cmd> ...
- """
- ap = subparsers.add_parser("exec")
- ap.add_argument("subu_id")
- ap.add_argument("--", dest="cmd", nargs=argparse.REMAINDER, default=[])
-
-
-def register_lo_commands(subparsers):
- """Register lo command:
-
- lo up|down <Subu_ID>
- """
- ap = subparsers.add_parser("lo")
- ap.add_argument("state", choices=["up", "down"])
- ap.add_argument("subu_id")
-
-
-def build_arg_parser(program_name: str) -> argparse.ArgumentParser:
- """Build the top level argument parser for the subu manager."""
- parser = argparse.ArgumentParser(prog=program_name, add_help=False)
- parser.add_argument("-V", "--Version", action="store_true", help="print version")
-
- subparsers = parser.add_subparsers(dest="verb")
-
- register_db_commands(subparsers)
- register_subu_commands(subparsers)
- register_wireguard_commands(subparsers)
- register_attach_commands(subparsers)
- register_network_commands(subparsers)
- register_option_commands(subparsers)
- register_exec_commands(subparsers)
- register_device_commands(subparsers)
- register_lo_commands(subparsers)
-
- return parser
-
-
-def _collect_parse_errors(ns, program_name: str) -> list[str]:
- """Check for semantic argument problems and collect error strings."""
- errors: list[str] = []
-
- if ns.verb == "device":
- if ns.action == "scan":
- return dispatch.device_scan(ns.base_dir)
-
- if ns.verb == "subu":
- sv = getattr(ns, "subu_verb", None)
- if sv in ("make", "capture"):
- if not ns.path or len(ns.path) < 2:
- errors.append(
- f"subu {sv} requires at least <masu> and one <subu> component"
- )
- elif sv in ("remove", "info"):
- if ns.target.startswith("subu_"):
- if ns.rest:
- errors.append(
- f"{program_name} subu {sv} with an ID form must not have extra path tokens"
- )
- else:
- if len([ns.target] + list(ns.rest)) < 2:
- errors.append(
- f"{program_name} subu {sv} <masu> <subu> [<subu> ...] requires at least two tokens"
- )
- elif sv == "option":
- # For incommon, same ID vs path rules as info/remove.
- if ns.opt_name == "incommon":
- if ns.target.startswith("subu_"):
- if ns.rest:
- errors.append(
- f"{program_name} subu option {ns.opt_action} incommon with an ID form "
- "must not have extra path tokens"
- )
- else:
- if len([ns.target] + list(ns.rest)) < 2:
- errors.append(
- f"{program_name} subu option {ns.opt_action} incommon "
- "<masu> <subu> [<subu> ...] requires at least two tokens"
- )
-
- return errors
-
-
-def CLI(argv=None) -> int:
- """Top level entry point for the subu manager CLI."""
- if argv is None:
- argv = sys.argv[1:]
-
- prog_override = os.environ.get("SUBU_PROGNAME")
- if prog_override:
- program_name = prog_override
- else:
- raw0 = sys.argv[0] or "subu"
- program_name = os.path.basename(raw0) or "subu"
-
- text = make_text(program_name)
-
- # No arguments is the same as "usage".
- if not argv:
- print(text.usage(), end="")
- return 0
-
- simple = {
- "help": text.help,
- "--help": text.help,
- "-h": text.help,
- "usage": text.usage,
- "example": text.example,
- "version": text.version,
- }
- if argv[0] in simple:
- print(simple[argv[0]](), end="")
- return 0
-
- parser = build_arg_parser(program_name)
- ns = parser.parse_args(argv)
-
- if getattr(ns, "Version", False):
- print(text.version(), end="")
- return 0
-
- errors = _collect_parse_errors(ns, program_name)
- if errors:
- for msg in errors:
- print(f"error: {msg}", file=sys.stderr)
- return 2
-
- try:
- if ns.verb == "db":
- if ns.db_verb == "load" and ns.what == "schema":
- return dispatch.db_load_schema()
-
- if ns.verb == "subu":
- sv = ns.subu_verb
- if sv == "make":
- return dispatch.subu_make(ns.path)
- if sv == "capture":
- return dispatch.subu_capture(ns.path)
- if sv == "list":
- return dispatch.subu_list()
- if sv == "info":
- return dispatch.subu_info(ns.target, ns.rest)
- if sv == "remove":
- return dispatch.subu_remove(ns.target, ns.rest)
- if sv == "option":
- # For now only 'incommon' is supported.
- return dispatch.subu_option_incommon(ns.opt_action, ns.target, ns.rest)
-
- if ns.verb == "lo":
- return dispatch.lo_toggle(ns.subu_id, ns.state)
-
- if ns.verb == "WG":
- v = ns.wg_verb
- if v in ("info", "information") and ns.arg1 is None:
- print("WG info requires WG_ID", file=sys.stderr)
- return 2
- if v == "global":
- return dispatch.wg_global(ns.arg1)
- if v == "make":
- return dispatch.wg_make(ns.arg1)
- if v == "server_provided_public_key":
- return dispatch.wg_server_public_key(ns.arg1, ns.arg2)
- if v in ("info", "information"):
- return dispatch.wg_info(ns.arg1)
- if v == "up":
- return dispatch.wg_up(ns.arg1)
- if v == "down":
- return dispatch.wg_down(ns.arg1)
-
- if ns.verb == "attach":
- if ns.what == "WG":
- return dispatch.attach_wg(ns.subu_id, ns.wg_id)
-
- if ns.verb == "detach":
- if ns.what == "WG":
- return dispatch.detach_wg(ns.subu_id)
-
- if ns.verb == "network":
- return dispatch.network_toggle(ns.subu_id, ns.state)
-
- if ns.verb == "option":
- # global options still placeholder
- print("option: not yet implemented", file=sys.stderr)
- return 1
-
- if ns.verb == "exec":
- if not ns.cmd:
- print(f"{program_name} exec <Subu_ID> -- <cmd> ...", file=sys.stderr)
- return 2
- return dispatch.exec(ns.subu_id, ns.cmd)
-
- print(text.usage(), end="")
- return 2
-
- except Exception as e:
- print(f"error: {e}", file=sys.stderr)
- return 1
-
-
-if __name__ == "__main__":
- sys.exit(CLI())
--- /dev/null
+#!/usr/bin/env python3
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+"""CLI.py — subu manager front-end.
+
+Role: parse argv, choose command, call dispatch.
+
+CLI should not do any work beyond:
+
+ * figure out program_name (for example, manager/CLI.py or wrapper name)
+ * call the right function in dispatch
+ * print text from text.py when needed
+ * exit with the returned status code
+"""
+
+import os, sys, argparse
+from text import make_text
+import dispatch
+
+
+def register_device_commands(subparsers):
+ """
+ Register device-related commands:
+
+ device scan [--base-dir DIR]
+
+ For v1, we only support scanning already-mounted devices under /mnt.
+ """
+ ap = subparsers.add_parser("device")
+ ap.add_argument("action", choices=["scan","attach","detach"])
+ ap.add_argument("mapname", nargs="?")
+ ap.add_argument("--base-dir", default="/mnt")
+
+
+def register_db_commands(subparsers):
+ """Register DB-related commands under 'db'.
+
+ db load schema
+ """
+ ap_db = subparsers.add_parser("db")
+ db_sub = ap_db.add_subparsers(dest="db_verb")
+
+ ap = db_sub.add_parser("load")
+ ap.add_argument("what", choices=["schema"])
+
+
+def register_subu_commands(subparsers):
+ """Register subu related commands under 'subu':
+
+ subu make <masu> <subu> [<subu>]*
+ subu capture <masu> <subu> [<subu>]*
+ subu remove <Subu_ID> | <masu> <subu> [<subu>]*
+ subu list
+ subu info <Subu_ID> | <masu> <subu> [<subu>]*
+ subu option set|clear incommon <Subu_ID> | <masu> <subu> [<subu>]*
+ """
+ ap_subu = subparsers.add_parser("subu")
+ subu_sub = ap_subu.add_subparsers(dest="subu_verb")
+
+ # make: path[0] is masu, remaining elements are the subu chain
+ ap = subu_sub.add_parser("make")
+ ap.add_argument("path", nargs="+")
+
+ # capture: path[0] is masu, remaining elements are the subu chain
+ ap = subu_sub.add_parser("capture")
+ ap.add_argument("path", nargs="+")
+
+ # remove: either ID or path
+ ap = subu_sub.add_parser("remove")
+ ap.add_argument("target")
+ ap.add_argument("rest", nargs="*")
+
+ # list
+ subu_sub.add_parser("list")
+
+ # info
+ ap = subu_sub.add_parser("info")
+ ap.add_argument("target")
+ ap.add_argument("rest", nargs="*")
+
+ # option incommon
+ ap = subu_sub.add_parser("option")
+ ap.add_argument("opt_action", choices=["set", "clear"])
+ ap.add_argument("opt_name", choices=["incommon"])
+ ap.add_argument("target")
+ ap.add_argument("rest", nargs="*")
+
+def register_subu_option_commands(subparsers):
+ ap = subparsers.add_parser("subu")
+ ap.add_argument("subverb", choices=["make","remove","list","info","capture","option"])
+ ap.add_argument("args", nargs=argparse.REMAINDER)
+
+
+def register_wireguard_commands(subparsers):
+ """Register WireGuard related commands, grouped under 'WG'."""
+ ap = subparsers.add_parser("WG")
+ ap.add_argument(
+ "wg_verb",
+ choices=[
+ "global",
+ "make",
+ "server_provided_public_key",
+ "info",
+ "information",
+ "up",
+ "down",
+ ],
+ )
+ ap.add_argument("arg1", nargs="?")
+ ap.add_argument("arg2", nargs="?")
+
+
+def register_attach_commands(subparsers):
+ """Register attach and detach commands:
+
+ attach WG <Subu_ID> <WG_ID>
+ detach WG <Subu_ID>
+ """
+ ap = subparsers.add_parser("attach")
+ ap.add_argument("what", choices=["WG"])
+ ap.add_argument("subu_id")
+ ap.add_argument("wg_id")
+
+ ap = subparsers.add_parser("detach")
+ ap.add_argument("what", choices=["WG"])
+ ap.add_argument("subu_id")
+
+
+def register_network_commands(subparsers):
+ """Register network aggregate commands:
+
+ network up|down <Subu_ID>
+ """
+ ap = subparsers.add_parser("network")
+ ap.add_argument("state", choices=["up", "down"])
+ ap.add_argument("subu_id")
+
+
+
+def register_option_commands(subparsers):
+ """Register global option commands (non-subu-specific for now):
+
+ option set|get|list ...
+ """
+ ap = subparsers.add_parser("option")
+ ap.add_argument("action", choices=["set", "get", "list"])
+ ap.add_argument("subu_id")
+ ap.add_argument("name", nargs="?")
+ ap.add_argument("value", nargs="?")
+
+
+def register_exec_commands(subparsers):
+ """Register exec command:
+
+ exec <Subu_ID> -- <cmd> ...
+ """
+ ap = subparsers.add_parser("exec")
+ ap.add_argument("subu_id")
+ ap.add_argument("--", dest="cmd", nargs=argparse.REMAINDER, default=[])
+
+
+def register_lo_commands(subparsers):
+ """Register lo command:
+
+ lo up|down <Subu_ID>
+ """
+ ap = subparsers.add_parser("lo")
+ ap.add_argument("state", choices=["up", "down"])
+ ap.add_argument("subu_id")
+
+
+def build_arg_parser(program_name: str) -> argparse.ArgumentParser:
+ """Build the top level argument parser for the subu manager."""
+ parser = argparse.ArgumentParser(prog=program_name, add_help=False)
+ parser.add_argument("-V", "--Version", action="store_true", help="print version")
+
+ subparsers = parser.add_subparsers(dest="verb")
+
+ register_db_commands(subparsers)
+ register_subu_commands(subparsers)
+ register_wireguard_commands(subparsers)
+ register_attach_commands(subparsers)
+ register_network_commands(subparsers)
+ register_option_commands(subparsers)
+ register_exec_commands(subparsers)
+ register_device_commands(subparsers)
+ register_lo_commands(subparsers)
+
+ return parser
+
+
+def _collect_parse_errors(ns, program_name: str) -> list[str]:
+ """Check for semantic argument problems and collect error strings."""
+ errors: list[str] = []
+
+ if ns.verb == "device":
+ if ns.action == "scan":
+ return dispatch.device_scan(ns.base_dir)
+
+ if ns.verb == "subu":
+ sv = getattr(ns, "subu_verb", None)
+ if sv in ("make", "capture"):
+ if not ns.path or len(ns.path) < 2:
+ errors.append(
+ f"subu {sv} requires at least <masu> and one <subu> component"
+ )
+ elif sv in ("remove", "info"):
+ if ns.target.startswith("subu_"):
+ if ns.rest:
+ errors.append(
+ f"{program_name} subu {sv} with an ID form must not have extra path tokens"
+ )
+ else:
+ if len([ns.target] + list(ns.rest)) < 2:
+ errors.append(
+ f"{program_name} subu {sv} <masu> <subu> [<subu> ...] requires at least two tokens"
+ )
+ elif sv == "option":
+ # For incommon, same ID vs path rules as info/remove.
+ if ns.opt_name == "incommon":
+ if ns.target.startswith("subu_"):
+ if ns.rest:
+ errors.append(
+ f"{program_name} subu option {ns.opt_action} incommon with an ID form "
+ "must not have extra path tokens"
+ )
+ else:
+ if len([ns.target] + list(ns.rest)) < 2:
+ errors.append(
+ f"{program_name} subu option {ns.opt_action} incommon "
+ "<masu> <subu> [<subu> ...] requires at least two tokens"
+ )
+
+ return errors
+
+
+def CLI(argv=None) -> int:
+ """Top level entry point for the subu manager CLI."""
+ if argv is None:
+ argv = sys.argv[1:]
+
+ prog_override = os.environ.get("SUBU_PROGNAME")
+ if prog_override:
+ program_name = prog_override
+ else:
+ raw0 = sys.argv[0] or "subu"
+ program_name = os.path.basename(raw0) or "subu"
+
+ text = make_text(program_name)
+
+ # No arguments is the same as "usage".
+ if not argv:
+ print(text.usage(), end="")
+ return 0
+
+ simple = {
+ "help": text.help,
+ "--help": text.help,
+ "-h": text.help,
+ "usage": text.usage,
+ "example": text.example,
+ "version": text.version,
+ }
+ if argv[0] in simple:
+ print(simple[argv[0]](), end="")
+ return 0
+
+ parser = build_arg_parser(program_name)
+ ns = parser.parse_args(argv)
+
+ if getattr(ns, "Version", False):
+ print(text.version(), end="")
+ return 0
+
+ errors = _collect_parse_errors(ns, program_name)
+ if errors:
+ for msg in errors:
+ print(f"error: {msg}", file=sys.stderr)
+ return 2
+
+ try:
+ if ns.verb == "db":
+ if ns.db_verb == "load" and ns.what == "schema":
+ return dispatch.db_load_schema()
+
+ if ns.verb == "device":
+ if ns.action == "scan": return dispatch.device_scan(ns.base_dir)
+ if ns.action == "attach": return dispatch.device_attach(ns.mapname)
+ if ns.action == "detach": return dispatch.device_detach(ns.mapname)
+
+ if ns.verb == "subu":
+ if ns.subverb == "capture":
+ # args: <masu> <subu> [.<subu>]*
+ return dispatch.subu_capture(ns.args)
+ if ns.subverb == "option":
+ # expected: set|clear incommon <masu> <subu> [.<subu>]*
+ if len(ns.args) < 3: ...
+ action, which, *rest = ns.args
+ owner, *parts = rest
+ if action == "set" and which == "incommon":
+ return dispatch.subu_option_incommon_set(owner, parts)
+ if action == "clear" and which == "incommon":
+ return dispatch.subu_option_incommon_clear(owner, parts)
+ sv = ns.subu_verb
+ if sv == "make":
+ return dispatch.subu_make(ns.path)
+ if sv == "list":
+ return dispatch.subu_list()
+ if sv == "info":
+ return dispatch.subu_info(ns.target, ns.rest)
+ if sv == "remove":
+ return dispatch.subu_remove(ns.target, ns.rest)
+ if sv == "option":
+ # For now only 'incommon' is supported.
+ return dispatch.subu_option_incommon(ns.opt_action, ns.target, ns.rest)
+
+ if ns.verb == "lo":
+ return dispatch.lo_toggle(ns.subu_id, ns.state)
+
+ if ns.verb == "WG":
+ v = ns.wg_verb
+ if v in ("info", "information") and ns.arg1 is None:
+ print("WG info requires WG_ID", file=sys.stderr)
+ return 2
+ if v == "global":
+ return dispatch.wg_global(ns.arg1)
+ if v == "make":
+ return dispatch.wg_make(ns.arg1)
+ if v == "server_provided_public_key":
+ return dispatch.wg_server_public_key(ns.arg1, ns.arg2)
+ if v in ("info", "information"):
+ return dispatch.wg_info(ns.arg1)
+ if v == "up":
+ return dispatch.wg_up(ns.arg1)
+ if v == "down":
+ return dispatch.wg_down(ns.arg1)
+
+ if ns.verb == "attach":
+ if ns.what == "WG":
+ return dispatch.attach_wg(ns.subu_id, ns.wg_id)
+
+ if ns.verb == "detach":
+ if ns.what == "WG":
+ return dispatch.detach_wg(ns.subu_id)
+
+ if ns.verb == "network":
+ return dispatch.network_toggle(ns.subu_id, ns.state)
+
+ if ns.verb == "option":
+ # global options still placeholder
+ print("option: not yet implemented", file=sys.stderr)
+ return 1
+
+ if ns.verb == "exec":
+ if not ns.cmd:
+ print(f"{program_name} exec <Subu_ID> -- <cmd> ...", file=sys.stderr)
+ return 2
+ return dispatch.exec(ns.subu_id, ns.cmd)
+
+ print(text.usage(), end="")
+ return 2
+
+ except Exception as e:
+ print(f"error: {e}", file=sys.stderr)
+ return 1
+
+
+if __name__ == "__main__":
+ sys.exit(CLI())
--- /dev/null
+# dispatch.py (additions)
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+import os, sys, sqlite3, subprocess
+import env
+from infrastructure.db import open_db
+from domain.subu import ensure_chain, find_by_path, subu_username
+from domain import device as device_domain
+
+def device_scan(base_dir: str ="/mnt") -> int:
+ try:
+ conn = open_db()
+ except Exception as e:
+ print(f"subu: cannot open database at '{env.db_path()}': {e}", file =sys.stderr)
+ return 1
+ try:
+ n = device_domain.scan_and_reconcile(conn, base_dir)
+ print(f"scanned {n} device(s) under {base_dir}")
+ return 0
+ finally:
+ conn.close()
+
+def subu_capture(path: list[str], device_mapname: str|None =None) -> int:
+ """
+ path: ['masu','s0','s1', ...]
+ device_mapname: optional mapname to associate (must already be visible under /mnt)
+ """
+ if not path or len(path) < 2:
+ print("subu: capture requires <masu> <subu> [.<subu>]*", file =sys.stderr)
+ return 2
+ owner, parts = path[0], path[1:]
+ try:
+ conn = open_db()
+ except Exception as e:
+ print(f"subu: cannot open database at '{env.db_path()}': {e}", file =sys.stderr)
+ return 1
+ try:
+ device_id = None
+ if device_mapname:
+ conn.row_factory = sqlite3.Row
+ row = conn.execute("SELECT id FROM device WHERE mapname=?", (device_mapname,)).fetchone()
+ if not row:
+ print(f"subu: device '{device_mapname}' not known; run 'device scan' first", file =sys.stderr)
+ return 2
+ device_id = int(row["id"])
+ leaf = ensure_chain(conn, owner, parts, device_id, True)
+ conn.commit()
+ print(leaf["full_unix_name"])
+ return 0
+ finally:
+ conn.close()
+
+def subu_list() -> int:
+ """
+ Print a flat list: id owner full_path full_unix_name device online
+ """
+ try:
+ conn = open_db()
+ except Exception as e:
+ print(f"subu: cannot open database at '{env.db_path()}': {e}", file =sys.stderr)
+ return 1
+ try:
+ conn.row_factory = sqlite3.Row
+ rows = conn.execute(
+ """SELECT n.id, n.owner, n.full_path, n.full_unix_name, n.is_online, d.mapname AS device
+ FROM subu_node n
+ LEFT JOIN device d ON d.id = n.device_id
+ ORDER BY n.owner, n.full_path"""
+ ).fetchall()
+ if not rows:
+ print("(no subu in database)")
+ return 0
+ for r in rows:
+ dev = r["device"] or "local"
+ on = "1" if int(r["is_online"] or 0) else "0"
+ print(f'{r["id"]}\t{r["owner"]}\t{r["full_path"]}\t{r["full_unix_name"]}\t{dev}\t{on}')
+ return 0
+ finally:
+ conn.close()
+
+def subu_option_incommon_set(spec_owner: str, spec_parts: list[str]) -> int:
+ """
+ Make a subu 'incommon': grant g+rx on its home dir and add all sibling subu users
+ under the same owner into its group. Unix work is delegated to infrastructure.unix.
+ """
+ from infrastructure.unix import incommon_set_for_subu # keep import local
+ try:
+ conn = open_db()
+ except Exception as e:
+ print(f"subu: cannot open database: {e}", file =sys.stderr)
+ return 1
+ try:
+ # Ensure the node exists in DB (don’t change device)
+ leaf = find_by_path(conn, spec_owner, spec_parts)
+ if not leaf:
+ print("subu: specified subu not found in DB; capture or make it first", file =sys.stderr)
+ return 2
+ incommon_set_for_subu(spec_owner, spec_parts)
+ return 0
+ finally:
+ conn.close()
+
+def subu_option_incommon_clear(spec_owner: str, spec_parts: list[str]) -> int:
+ """
+ Reverse of set: remove g+rx and drop sibling subu users from its group.
+ """
+ from infrastructure.unix import incommon_clear_for_subu
+ try:
+ conn = open_db()
+ except Exception as e:
+ print(f"subu: cannot open database: {e}", file =sys.stderr)
+ return 1
+ try:
+ leaf = find_by_path(conn, spec_owner, spec_parts)
+ if not leaf:
+ print("subu: specified subu not found in DB; capture or make it first", file =sys.stderr)
+ return 2
+ incommon_clear_for_subu(spec_owner, spec_parts)
+ return 0
+ finally:
+ conn.close()
+
+def device_attach(mapname: str) -> int:
+ """
+ Call your existing shell to open+mount /mnt/<mapname>, then reconcile.
+ (No mid-session home swapping here; policy enforcement to be added around callers.)
+ """
+ # You can parameterize paths via env.py if preferred.
+ opener = "/root/mount/device_mapname__open_mount.sh"
+ if not os.path.exists(opener):
+ print(f"subu: cannot find opener script at {opener}", file =sys.stderr)
+ return 1
+ # We don’t guess /dev/sdX here; you pass it in your wrapper.
+ # For now just ensure /mnt/<mapname> is mounted by your own workflow,
+ # then call reconcile:
+ try:
+ conn = open_db()
+ except Exception as e:
+ print(f"subu: cannot open database: {e}", file =sys.stderr); return 1
+ try:
+ processed = device_domain.scan_and_reconcile(conn, "/mnt")
+ print(f"scanned {processed} device(s) under /mnt")
+ return 0
+ finally:
+ conn.close()
+
+def device_detach(mapname: str) -> int:
+ """
+ Delegate to your logout/unmount scripts, then mark DB offline.
+ """
+ from infrastructure.unix import mark_device_offline
+ try:
+ conn = open_db()
+ except Exception as e:
+ print(f"subu: cannot open database: {e}", file =sys.stderr); return 1
+ try:
+ # Your script already unmounts and closes; afterwards, mark offline in DB:
+ conn.execute("UPDATE device SET state='offline' WHERE mapname=?", (mapname,))
+ conn.execute(
+ "UPDATE subu_node SET is_online=0, updated_at=datetime('now') "
+ "WHERE device_id=(SELECT id FROM device WHERE mapname=?)",
+ (mapname,),
+ )
+ conn.commit()
+ print(f"device '{mapname}' marked offline")
+ return 0
+ finally:
+ conn.close()
--- /dev/null
+# domain/device.py
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+import os, sqlite3
+from datetime import datetime
+from pathlib import Path
+
+from domain.subu import ensure_chain
+
+def _utc_now() -> str:
+ return datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+
+def _walk_subu_paths(subu_root: Path):
+ """
+ Yield subu component lists by descending the nested subu_data tree.
+ e.g. ['developer'], ['developer','bolt'], ...
+ """
+ stack: list[tuple[Path, list[str]]] = [(subu_root, [])]
+ while stack:
+ base, prefix = stack.pop()
+ try:
+ entries = sorted(p for p in base.iterdir() if p.is_dir())
+ except FileNotFoundError:
+ continue
+ for d in entries:
+ name = d.name
+ path = prefix + [name]
+ yield path
+ nxt = d / "subu_data"
+ if nxt.is_dir():
+ stack.append((nxt, path))
+
+def _upsert_device(conn, mapname: str, mount_point: str, kind: str ="external") -> int:
+ now = _utc_now()
+ conn.row_factory = sqlite3.Row
+ row = conn.execute("SELECT id FROM device WHERE mapname=?", (mapname,)).fetchone()
+ if row:
+ dev_id = row["id"]
+ conn.execute(
+ "UPDATE device SET mount_point=?, kind=?, state='online', last_seen=? WHERE id=?",
+ (mount_point, kind, now, dev_id),
+ )
+ return int(dev_id)
+ cur = conn.execute(
+ "INSERT INTO device(mapname,mount_point,kind,state,last_seen) VALUES(?,?,?,'online',?)",
+ (mapname, mount_point, kind, now),
+ )
+ return int(cur.lastrowid)
+
+def _mark_missing_offline(conn, device_id: int, seen_keys: set[tuple[str,int]]):
+ """
+ Mark rows in subu_node for this device as offline if leaf id not seen.
+ We compare by (owner_id, node_id) but since we don’t store owner ids,
+ we key by (owner, id) indirectly via a select.
+ """
+ conn.row_factory = sqlite3.Row
+ now = _utc_now()
+ cur = conn.execute("SELECT id FROM subu_node WHERE device_id=?", (device_id,))
+ for r in cur.fetchall():
+ node_id = r["id"]
+ # Seen set uses node ids only (device scoping suffices)
+ if (device_id, node_id) in seen_keys:
+ continue
+ conn.execute(
+ "UPDATE subu_node SET is_online=0, updated_at=? WHERE id=?",
+ (now, node_id),
+ )
+
+def reconcile_device(conn, mapname: str, mount_point: str) -> int:
+ """
+ Reconcile a single already-mounted device (/mnt/<mapname>).
+ Returns number of subu nodes (leaf count) discovered/refreshed.
+ """
+ user_data = Path(mount_point) / "user_data"
+ if not user_data.is_dir():
+ return 0
+
+ device_id = _upsert_device(conn, mapname, mount_point)
+ conn.row_factory = sqlite3.Row
+ now = _utc_now()
+ refreshed = 0
+ seen: set[tuple[int,int]] = set() # (device_id, node_id)
+
+ for masu_dir in sorted(p for p in user_data.iterdir() if p.is_dir()):
+ owner = masu_dir.name
+ subu_root = masu_dir / "subu_data"
+ if not subu_root.is_dir():
+ continue
+ for parts in _walk_subu_paths(subu_root):
+ # Ensure the chain exists and is marked online on this device
+ leaf = ensure_chain(conn, owner, parts, device_id, True)
+ seen.add((device_id, int(leaf["id"])))
+ refreshed += 1
+
+ _mark_missing_offline(conn, device_id, seen)
+ conn.commit()
+ return refreshed
+
+def scan_and_reconcile(conn, base_dir: str ="/mnt") -> int:
+ """
+ Scan /mnt/* for mapnames that contain a top-level user_data/ and reconcile each.
+ Returns the number of devices processed.
+ """
+ root = Path(base_dir)
+ if not root.is_dir():
+ return 0
+ processed = 0
+ for mp in sorted(p for p in root.iterdir() if p.is_dir()):
+ if not (mp / "user_data").is_dir():
+ continue
+ refreshed = reconcile_device(conn, mp.name, str(mp))
+ # Count device even if zero subu (e.g. only user_data/ present)
+ processed += 1
+ return processed
--- /dev/null
+"""
+4.5 domain/exec.py
+
+Run a command inside a subu’s namespace and UID.
+
+4.5.1 run_in_subu(subu: Subu, cmd_argv: list[str]) -> int
+"""
+def exec_in_subu(subu_id: str, cmd: list):
+ sid = int(subu_id.split("_")[1])
+ with closing(_db()) as db:
+ ns = db.execute("SELECT netns FROM subu WHERE id=?", (sid,)).fetchone()[0]
+ os.execvp("ip", ["ip","netns","exec", ns] + cmd)
--- /dev/null
+"""
+4.3 domain/network.py
+
+Netns + device wiring, including aggregate “network up/down”.
+
+4.3.1 lo_toggle(subu: Subu, state: str) -> None
+4.3.2 attach_wg(subu: Subu, wg: WG) -> None
+4.3.3 detach_wg(subu: Subu) -> None
+4.3.4 network_toggle(subu: Subu, state: str) -> None
+"""
+def network_toggle(subu_id: str, state: str):
+ sid = int(subu_id.split("_")[1])
+ with closing(_db()) as db:
+ ns, wid = db.execute("SELECT netns,wg_id FROM subu WHERE id=?", (sid,)).fetchone()
+ # always make sure lo up on 'up'
+ if state == "up":
+ run(["ip", "netns", "exec", ns, "ip", "link", "set", "lo", "up"], check=False)
+ if wid is not None:
+ ifname = f"subu_{wid}"
+ run(["ip", "-n", ns, "link", "set", "dev", ifname, state], check=False)
+ with closing(_db()) as db:
+ db.execute("UPDATE subu SET network_state=? WHERE id=?", (state, sid))
+ db.commit()
+ print(f"{subu_id}: network {state}")
+
+def _make_netns_for_subu(subu_id_num: int, netns_name: str):
+ """
+ Create the network namespace & bring lo down.
+ """
+ # ip netns add ns-subu_<id>
+ run(["ip", "netns", "add", netns_name])
+ # ip netns exec ns-subu_<id> ip link set lo down
+ run(["ip", "netns", "exec", netns_name, "ip", "link", "set", "lo", "down"])
--- /dev/null
+"""
+4.4 domain/options.py
+
+Per-subu options, backed by DB.
+
+4.4.1 set_option(subu_id: str, name: str, value: str) -> None
+4.4.2 get_option(subu_id: str, name: str) -> str | None
+4.4.3 list_options(subu_id: str) -> dict[str, str]
+"""
+def option_set(subu_id: str, name: str, value: str):
+ sid = int(subu_id.split("_")[1])
+ with closing(_db()) as db:
+ db.execute("INSERT INTO options (subu_id,name,value) VALUES(?,?,?) "
+ "ON CONFLICT(subu_id,name) DO UPDATE SET value=excluded.value",
+ (sid, name, value))
+ db.commit()
+ print("ok")
+
+def option_get(subu_id: str, name: str):
+ sid = int(subu_id.split("_")[1])
+ with closing(_db()) as db:
+ row = db.execute("SELECT value FROM options WHERE subu_id=? AND name=?", (sid,name)).fetchone()
+ print(row[0] if row else "")
+
+def option_list(subu_id: str):
+ sid = int(subu_id.split("_")[1])
+ with closing(_db()) as db:
+ rows = db.execute("SELECT name,value FROM options WHERE subu_id=?", (sid,)).fetchall()
+ for n,v in rows:
+ print(f"{n}={v}")
+
--- /dev/null
+# domain/subu.py
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+from infrastructure.unix import (
+ ensure_unix_user,
+ ensure_user_in_group,
+ remove_unix_user_and_group,
+ user_exists,
+)
+from typing import Iterable
+import sqlite3, datetime
+
+def _now(): return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+
+def subu_username(owner: str, parts: list[str]) -> str:
+ """
+ Build the Unix login name, enforcing 'no underscore in tokens'.
+ """
+ owner_ok = _validate_token("masu", owner)
+ parts_ok = [_validate_token("subu", p) for p in parts]
+ return "_".join([owner_ok] + parts_ok)
+
+def ensure_chain(conn, owner: str, parts: list[str], device_id: int|None, online: bool):
+ """
+ Ensure that owner/parts[...] exists as a chain; return leaf row (dict).
+ """
+ # Validate once up-front
+ owner = _validate_token("masu", owner)
+ parts = [_validate_token("subu", p) for p in parts]
+
+ conn.row_factory = sqlite3.Row
+ parent_id = None
+ chain: list[str] = []
+ now = _now()
+ for seg in parts:
+ row = conn.execute(
+ "SELECT * FROM subu_node WHERE owner=? AND name=? AND parent_id IS ?",
+ (owner, seg, parent_id)
+ ).fetchone()
+ if row:
+ parent_id = row["id"]
+ chain.append(seg)
+ continue
+ chain.append(seg)
+ full_path = " ".join([owner] + chain)
+ full_unix = subu_username(owner, chain)
+ netns = full_unix
+ conn.execute(
+ """INSERT INTO subu_node(owner,name,parent_id,full_unix_name,full_path,netns_name,
+ device_id,is_online,created_at,updated_at)
+ VALUES(?,?,?,?,?,?,?, ?,?,?)""",
+ (owner, seg, parent_id, full_unix, full_path, netns,
+ device_id, 1 if online else 0, now, now)
+ )
+ parent_id = conn.execute("SELECT last_insert_rowid() id").fetchone()["id"]
+ leaf = conn.execute("SELECT * FROM subu_node WHERE id=?", (parent_id,)).fetchone()
+ return dict(leaf)
+
+def find_by_path(conn, owner: str, parts: list[str]):
+ conn.row_factory = sqlite3.Row
+ parent_id = None
+ for seg in parts:
+ row = conn.execute(
+ "SELECT * FROM subu_node WHERE owner=? AND name=? AND parent_id IS ?",
+ (owner, seg, parent_id)
+ ).fetchone()
+ if not row:
+ return None
+ parent_id = row["id"]
+ return dict(row)
+
+def list_children(conn, node_id: int|None, owner: str):
+ """
+ node_id=None lists top-level subu of owner; otherwise children of node_id.
+ """
+ conn.row_factory = sqlite3.Row
+ if node_id is None:
+ cur = conn.execute("SELECT * FROM subu_node WHERE owner=? AND parent_id IS NULL ORDER BY name", (owner,))
+ else:
+ cur = conn.execute("SELECT * FROM subu_node WHERE owner=? AND parent_id=? ORDER BY name", (owner, node_id))
+ return [dict(r) for r in cur.fetchall()]
+
+def _validate_token(label: str, token: str) -> str:
+ """
+ Validate a single path token (masu or subu).
+
+ Rules:
+ - must be non-empty after stripping whitespace
+ - must not contain underscore '_'
+ """
+ token_stripped = token.strip()
+ if not token_stripped:
+ raise SystemExit(f"subu: {label} name must be non-empty")
+ if "_" in token_stripped:
+ raise SystemExit(
+ f"subu: {label} name '{token_stripped}' must not contain underscore '_'"
+ )
+ # dashes are fine; acronyms and proper nouns are fine.
+ return token_stripped
+
+
+def _parent_username(masu: str, path_components: list[str]) -> str | None:
+ """
+ Return the Unix username of the parent subu, or None if this is top-level.
+
+ Examples:
+ masu="Thomas", path=["S0"] -> None (parent is just the masu)
+ masu="Thomas", path=["S0","S1"] -> "Thomas_S0"
+ """
+ if len(path_components) <= 1:
+ return None
+ # parent path is everything except last token
+ parent_path = path_components[:-1]
+ return subu_username(masu, parent_path)
+
+
+def _ancestor_group_names(masu: str, path_components: list[str]) -> list[str]:
+ """
+ Compute ancestor groups that a subu must join for directory traversal.
+
+ For path:
+ [masu, s1, s2, ..., sk]
+
+ we return:
+ [masu,
+ masu_s1,
+ masu_s1_s2,
+ ...,
+ masu_s1_..._s{k-1}]
+
+ The last element (full username) is NOT included, because that is
+ the subu's own primary group.
+ """
+ groups: list[str] = []
+ # masu group (allows traversal of /home/masu and /home/masu/subu_data)
+ groups.append(_validate_token("masu", masu))
+
+ # For deeper subu, add each ancestor subu's group
+ for depth in range(1, len(path_components)):
+ prefix = path_components[:depth]
+ groups.append(subu_username(masu, prefix))
+
+ return groups
+
+
+def make_subu(masu: str, path_components: list[str]) -> str:
+ """
+ Make the Unix user and group for this subu.
+
+ The subu path is:
+ masu subu subu ...
+
+ Rules:
+ - len(path_components) >= 1
+ - tokens must not contain '_'
+ - parent must exist:
+ * for first-level subu: Unix user 'masu' must exist
+ * for deeper subu: parent subu unix user must exist
+
+ Returns:
+ Unix username, for example 'Thomas_S0' or 'Thomas_S0_S1'.
+ """
+ if not path_components:
+ raise SystemExit("subu: make requires at least one subu component")
+
+ # Normalize and validate tokens (this will raise SystemExit on error).
+ # subu_username will call _validate_token internally.
+ username = subu_username(masu, path_components)
+
+ # Enforce parent existence
+ parent_uname = _parent_username(masu, path_components)
+ if parent_uname is None:
+ # Top-level subu: require the masu Unix user to exist
+ masu_name = _validate_token("masu", masu)
+ if not user_exists(masu_name):
+ raise SystemExit(
+ f"subu: cannot make '{username}': "
+ f"masu Unix user '{masu_name}' does not exist"
+ )
+ else:
+ # Deeper subu: require parent subu Unix user to exist
+ if not user_exists(parent_uname):
+ raise SystemExit(
+ f"subu: cannot make '{username}': "
+ f"parent subu unix user '{parent_uname}' does not exist"
+ )
+
+ # For now, group and user share the same name.
+ ensure_unix_user(username, username)
+
+ # Add this subu to the ancestor groups so that directory traversal works:
+ # /home/masu
+ # /home/masu/subu_data
+ # /home/masu/subu_data/<parent>/subu_data/...
+ ancestor_groups = _ancestor_group_names(masu, path_components)
+ for gname in ancestor_groups:
+ ensure_user_in_group(username, gname)
+
+ return username
+
+def remove_subu(masu: str, path_components: list[str]) -> str:
+ """
+ Remove the Unix user and group for this subu, if they exist.
+
+ The subu path is:
+ masu subu subu ...
+
+ Returns:
+ Unix username that was targeted.
+ """
+ if not path_components:
+ raise SystemExit("subu: remove requires at least one subu component")
+
+ username = subu_username(masu, path_components)
+ remove_unix_user_and_group(username)
+ return username
--- /dev/null
+"""
+4.2 domain/wg.py
+
+WireGuard objects, independent of subu.
+
+4.2.1 set_global_pool(base_cidr: str) -> None
+4.2.2 make_wg(endpoint: str) -> WG
+4.2.3 set_server_public_key(wg_id: str, key: str) -> None
+4.2.4 get_wg(wg_id: str) -> WG
+4.2.5 bring_up(wg_id: str) -> None
+4.2.6 bring_down(wg_id: str) -> None
+"""
+
+def wg_global(basecidr: str):
+ WG_GLOBAL_FILE.write_text(basecidr.strip()+"\n")
+ print(f"WG pool base = {basecidr}")
+
+def _alloc_ip(idx: int, base: str) -> str:
+ # simplistic /24 allocator: base must be x.y.z.0/24
+ prefix = base.split("/")[0].rsplit(".", 1)[0]
+ host = 2 + idx
+ return f"{prefix}.{host}/32"
+
+def wg_make(endpoint: str) -> str:
+ if not WG_GLOBAL_FILE.exists():
+ raise RuntimeError("set WG base with `subu WG global <CIDR>` first")
+ base = WG_GLOBAL_FILE.read_text().strip()
+ with closing(_db()) as db:
+ c = db.cursor()
+ idx = c.execute("SELECT COUNT(*) FROM wg").fetchone()[0]
+ local_ip = _alloc_ip(idx, base)
+ c.execute("INSERT INTO wg (endpoint, local_ip, allowed_ips) VALUES (?, ?, ?)",
+ (endpoint, local_ip, "0.0.0.0/0"))
+ wid = c.lastrowid
+ db.commit()
+ print(f"WG_{wid} endpoint={endpoint} ip={local_ip}")
+ return f"WG_{wid}"
+
+def wg_set_pubkey(wg_id: str, key: str):
+ wid = int(wg_id.split("_")[1])
+ with closing(_db()) as db:
+ db.execute("UPDATE wg SET pubkey=? WHERE id=?", (key, wid))
+ db.commit()
+ print("ok")
+
+def wg_info(wg_id: str):
+ wid = int(wg_id.split("_")[1])
+ with closing(_db()) as db:
+ row = db.execute("SELECT * FROM wg WHERE id=?", (wid,)).fetchone()
+ print(row if row else "not found")
+
+def wg_up(wg_id: str):
+ wid = int(wg_id.split("_")[1])
+ # Admin-up of WG device handled via network_toggle once attached.
+ print(f"{wg_id}: up (noop until attached)")
+
+def wg_down(wg_id: str):
+ wid = int(wg_id.split("_")[1])
+ print(f"{wg_id}: down (noop until attached)")
+
--- /dev/null
+# env.py
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+from pathlib import Path
+
+
+def version() -> str:
+ """
+ Software / CLI version.
+ """
+ return "0.3.5"
+
+
+def db_schema_version() -> str:
+ """
+ Database schema version (used in the DB filename).
+
+ Bump this only when the DB layout/semantics change,
+ not for every CLI code change.
+ """
+ return "0.1"
+
+
+def db_root_dir() -> Path:
+ """
+ Root directory for the manager database.
+ """
+ return Path("/opt/subu")
+
+
+def db_filename() -> str:
+ """
+ Filename of the SQLite database, relative to db_root_dir.
+ """
+ return f"subu_{db_schema_version()}.sqlite3"
+
+
+def db_path() -> str:
+ """
+ Full path to the SQLite database file.
+
+ Currently this is:
+
+ /opt/subu/subu_<schema>.sqlite3
+
+ There is deliberately no environment override here; this path
+ defines the canonical system-wide DB used by all manager invocations.
+ """
+ return str(db_root_dir() / db_filename())
--- /dev/null
+"""
+bpf.py
+
+Compile/load the BPF program.
+
+5.3.1 compile_bpf(source_path: str, output_path: str) -> None
+5.3.2 load_bpf(obj_path: str) -> BpfHandle
+"""
+
+def attach_wg(subu_id: str, wg_id: str):
+ ensure_mounts()
+ sid = int(subu_id.split("_")[1]); wid = int(wg_id.split("_")[1])
+ with closing(_db()) as db:
+ r = db.execute("SELECT netns FROM subu WHERE id=?", (sid,)).fetchone()
+ if not r: raise ValueError("subu not found")
+ ns = r[0]
+ w = db.execute("SELECT endpoint, local_ip, pubkey FROM wg WHERE id=?", (wid,)).fetchone()
+ if not w: raise ValueError("WG not found")
+ endpoint, local_ip, pubkey = w
+
+ ifname = f"subu_{wid}"
+ # make WG link in init ns, move to netns
+ run(["ip", "link", "add", ifname, "type", "wireguard"])
+ run(["ip", "link", "set", ifname, "netns", ns])
+ run(["ip", "-n", ns, "addr", "add", local_ip, "dev", ifname], check=False)
+ run(["ip", "-n", ns, "link", "set", "dev", ifname, "mtu", "1420"])
+ run(["ip", "-n", ns, "link", "set", "dev", ifname, "down"]) # keep engine down until `network up`
+
+ # install steering (MVP: make cgroup + attach bpf program)
+ try:
+ install_steering(subu_id, ns, ifname)
+ print(f"{subu_id}: eBPF steering installed -> {ifname}")
+ except BpfError as e:
+ print(f"{subu_id}: steering warning: {e}")
+
+ with closing(_db()) as db:
+ db.execute("UPDATE subu SET wg_id=? WHERE id=?", (wid, sid))
+ db.commit()
+ print(f"attached {wg_id} to {subu_id} in {ns} as {ifname}")
+
+def detach_wg(subu_id: str):
+ ensure_mounts()
+ sid = int(subu_id.split("_")[1])
+ with closing(_db()) as db:
+ r = db.execute("SELECT netns,wg_id FROM subu WHERE id=?", (sid,)).fetchone()
+ if not r: print("not found"); return
+ ns, wid = r
+ if wid is None:
+ print("nothing attached"); return
+ ifname = f"subu_{wid}"
+ run(["ip", "-n", ns, "link", "del", ifname], check=False)
+ try:
+ remove_steering(subu_id)
+ except BpfError as e:
+ print(f"steering remove warn: {e}")
+ with closing(_db()) as db:
+ db.execute("UPDATE subu SET wg_id=NULL WHERE id=?", (sid,))
+ db.commit()
+ print(f"detached WG_{wid} from {subu_id}")
+
--- /dev/null
+// -*- mode: c; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*-
+// bpf_force_egress.c — MVP scaffold to validate UID and prep metadata
+/*
+ bpf_force_egress.c
+
+5.5.1 no callable Python API; compiled/used via bpf.py.
+*/
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+
+char LICENSE[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, __u32); // tgid
+ __type(value, __u32); // reserved (target ifindex placeholder)
+ __uint(max_entries, 1024);
+} subu_tgid2if SEC(".maps");
+
+// Helper: return 0 = allow, <0 reject
+static __always_inline int allow_uid(struct bpf_sock_addr *ctx) {
+ // MVP: just accept everyone; you can gate on UID 2017 with bpf_get_current_uid_gid()
+ // __u32 uid = (__u32)(bpf_get_current_uid_gid() & 0xffffffff);
+ // if (uid != 2017) return -1;
+ return 0;
+}
+
+// Hook: cgroup/connect4 — runs before connect(2) proceeds
+SEC("cgroup/connect4")
+int subu_connect4(struct bpf_sock_addr *ctx)
+{
+ if (allow_uid(ctx) < 0) return -1;
+ // Future: read pinned map/meta, set SO_* via bpf_setsockopt when permitted
+ return 0;
+}
+
+// Hook: cgroup/post_bind4 — runs after a local bind is chosen
+SEC("cgroup/post_bind4")
+int subu_post_bind4(struct bpf_sock *sk)
+{
+ // Future: enforce bound dev if kernel helper allows; record tgid->ifindex
+ __u32 tgid = bpf_get_current_pid_tgid() >> 32;
+ __u32 val = 0;
+ bpf_map_update_elem(&subu_tgid2if, &tgid, &val, BPF_ANY);
+ return 0;
+}
--- /dev/null
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+"""
+bpf_worker.py
+
+Cgroup + BPF orchestration for per-subu steering.
+
+5.4.1 ensure_mounts() -> None
+5.4.2 install_steering(subu: Subu, wg_iface: str) -> None
+5.4.3 remove_steering(subu: Subu) -> None
+5.4.4 class BpfError(Exception)
+"""
+import os, subprocess, json
+from pathlib import Path
+
+class BpfError(RuntimeError): pass
+
+def run(cmd, check=True):
+ r = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ if check and r.returncode != 0:
+ raise BpfError(f"cmd failed: {' '.join(cmd)}\n{r.stderr}")
+ return r.stdout.strip()
+
+def ensure_mounts():
+ # ensure bpf and cgroup v2 are mounted
+ try:
+ Path("/sys/fs/bpf").mkdir(parents=True, exist_ok=True)
+ run(["mount","-t","bpf","bpf","/sys/fs/bpf"], check=False)
+ except Exception:
+ pass
+ try:
+ Path("/sys/fs/cgroup").mkdir(parents=True, exist_ok=True)
+ run(["mount","-t","cgroup2","none","/sys/fs/cgroup"], check=False)
+ except Exception:
+ pass
+
+def cgroup_path(subu_id: str) -> str:
+ return f"/sys/fs/cgroup/{subu_id}"
+
+def install_steering(subu_id: str, netns: str, ifname: str):
+ ensure_mounts()
+ cg = Path(cgroup_path(subu_id))
+ cg.mkdir(parents=True, exist_ok=True)
+
+ # compile BPF
+ obj = Path("./bpf_force_egress.o")
+ src = Path("./bpf_force_egress.c")
+ if not src.exists():
+ raise BpfError("bpf_force_egress.c missing next to manager")
+
+ # Build object (requires clang/llc/bpftool)
+ run(["clang","-O2","-g","-target","bpf","-c",str(src),"-o",str(obj)])
+
+ # Load program into bpffs; attach to cgroup/inet4_connect + inet4_post_bind (MVP)
+ pinned = f"/sys/fs/bpf/{subu_id}_egress"
+ run(["bpftool","prog","loadall",str(obj),pinned], check=True)
+
+ # Attach to hooks (MVP validation hooks)
+ # NOTE: these are safe no-ops for now; they validate UID and stash ifindex map.
+ for hook in ("cgroup/connect4","cgroup/post_bind4"):
+ run(["bpftool","cgroup","attach",cgroup_path(subu_id),"attach",hook,"pinned",f"{pinned}/prog_0"], check=False)
+
+ # Write metadata for ifname (saved for future prog versions)
+ meta = {"ifname": ifname}
+ Path(f"/sys/fs/bpf/{subu_id}_meta.json").write_text(json.dumps(meta))
+
+def remove_steering(subu_id: str):
+ cg = cgroup_path(subu_id)
+ # Detach whatever is attached
+ for hook in ("cgroup/connect4","cgroup/post_bind4"):
+ subprocess.run(["bpftool","cgroup","detach",cg,"detach",hook], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ # Remove pinned prog dir
+ pinned = Path(f"/sys/fs/bpf/{subu_id}_egress")
+ if pinned.exists():
+ subprocess.run(["bpftool","prog","detach",str(pinned)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
+ try:
+ for p in pinned.glob("*"): p.unlink()
+ pinned.rmdir()
+ except Exception:
+ pass
+ # Remove cgroup dir
+ try:
+ Path(cg).rmdir()
+ except Exception:
+ pass
--- /dev/null
+# infrastructure/db.py
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+import sqlite3
+from pathlib import Path
+import env
+
+
+def schema_path_default():
+ """
+ Path to schema.sql, assumed to live next to this file.
+ """
+ return Path(__file__).with_name("schema.sql")
+
+
+def open_db(path=None):
+ """
+ Return a sqlite3.Connection with sensible pragmas.
+ Caller is responsible for closing.
+
+ If path is None, the canonical manager DB path from env.db_path()
+ is used. The parent directory is created if it does not exist.
+ """
+ if path is None:
+ path = env.db_path()
+
+ path_obj = Path(path)
+ parent = path_obj.parent
+
+ try:
+ parent.mkdir(parents=True, exist_ok=True)
+ except PermissionError as e:
+ raise RuntimeError(f"cannot create DB directory '{parent}': {e}") from e
+
+ conn = sqlite3.connect(str(path_obj))
+ conn.row_factory = sqlite3.Row
+ conn.execute("PRAGMA foreign_keys = ON")
+ conn.execute("PRAGMA journal_mode = WAL")
+ conn.execute("PRAGMA synchronous = NORMAL")
+ return conn
+
+
+def ensure_schema(conn):
+ """
+ Ensure the schema in schema.sql is applied.
+ This is idempotent: executing the DDL again is acceptable.
+ """
+ sql = schema_path_default().read_text(encoding="utf-8")
+ conn.executescript(sql)
+ conn.commit()
--- /dev/null
+# domain/device.py
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+"""
+Device-aware reconciliation of subu state.
+
+This module assumes:
+ * Devices with user data are mounted as: /mnt/<mapname>
+ * On each device, user data lives under: /mnt/<mapname>/user_data/<masu>
+ * Subu home directories follow the pattern:
+
+ /mnt/<mapname>/user_data/<masu>/subu_data/<subu0>/subu_data/<subu1>/...
+
+ i.e., each subu directory may contain a 'subu_data' directory for children.
+
+Given an open SQLite connection, scan_and_reconcile() will:
+
+ * Discover all devices under a base directory (default: /mnt)
+ * For each device that has 'user_data':
+ - Upsert a row in the 'device' table.
+ - Discover all subu paths for all masus on that device.
+ - Upsert/refresh rows in 'subu' with device_id + is_online=1.
+ - Mark any previously-known subu on that device that are not seen
+ in the current scan as is_online=0.
+"""
+
+import os
+from datetime import datetime
+from pathlib import Path
+
+from domain.subu import subu_username
+
+
+def _utc_now() -> str:
+ """
+ Return a UTC timestamp string suitable for created_at/updated_at/last_seen.
+ Example: '2025-11-11T05:30:12Z'
+ """
+ return datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
+
+
+def _walk_subu_paths(subu_root: Path):
+ """
+ Yield all subu paths under a root 'subu_data' directory.
+
+ Layout assumption:
+
+ subu_root/
+ S0/
+ ...files...
+ subu_data/
+ S1/
+ ...
+ subu_data/
+ S2/
+ ...
+
+ For each logical path:
+ ['S0'] (top-level)
+ ['S0','S1'] (child)
+ ['S0','S1','S2'] (grand-child)
+ ...
+
+ we yield the list of path components.
+ """
+ stack: list[tuple[Path, list[str]]] = [(subu_root, [])]
+
+ while stack:
+ current_root, prefix = stack.pop()
+ try:
+ entries = sorted(current_root.iterdir(), key =lambda p: p.name)
+ except FileNotFoundError:
+ continue
+
+ for entry in entries:
+ if not entry.is_dir():
+ continue
+ name = entry.name
+ path_components = prefix + [name]
+ yield path_components
+
+ child_subu_data = entry / "subu_data"
+ if child_subu_data.is_dir():
+ stack.append((child_subu_data, path_components))
+
+
+def _upsert_device(
+ conn,
+ mapname: str,
+ mount_point: str,
+ kind: str ="external",
+) -> int:
+ """
+ Ensure a row exists for this device and return its id.
+
+ We do NOT try to discover fs_uuid/luks_uuid here; those can be filled
+ in later if desired.
+ """
+ now = _utc_now()
+
+ cur = conn.execute(
+ "SELECT id FROM device WHERE mapname = ?",
+ (mapname,),
+ )
+ row = cur.fetchone()
+
+ if row:
+ device_id = row["id"]
+ conn.execute(
+ """
+ UPDATE device
+ SET mount_point = ?,
+ kind = ?,
+ state = 'online',
+ last_seen = ?
+ WHERE id = ?
+ """,
+ (mount_point, kind, now, device_id),
+ )
+ else:
+ cur = conn.execute(
+ """
+ INSERT INTO device (mapname, mount_point, kind, state, last_seen)
+ VALUES (?, ?, ?, 'online', ?)
+ """,
+ (mapname, mount_point, kind, now),
+ )
+ device_id = cur.lastrowid
+
+ return int(device_id)
+
+
+def _ensure_subu_row(
+ conn,
+ device_id: int,
+ owner: str,
+ subu_path_components: list[str],
+ full_path_str: str,
+ now: str,
+):
+ """
+ Upsert a row in 'subu' for (owner, subu_path_components) on device_id.
+
+ full_path_str is the human-readable path, e.g. 'Thomas local' or
+ 'Thomas developer bolt'.
+ """
+ if not subu_path_components:
+ return
+
+ leaf_name = subu_path_components[-1]
+ full_unix_name = subu_username(owner, subu_path_components)
+
+ # For now, we simply reuse full_unix_name as netns_name.
+ netns_name = full_unix_name
+
+ # See if a row already exists for this owner + path.
+ cur = conn.execute(
+ "SELECT id FROM subu WHERE owner = ? AND path = ?",
+ (owner, full_path_str),
+ )
+ row = cur.fetchone()
+
+ if row:
+ subu_id = row["id"]
+ conn.execute(
+ """
+ UPDATE subu
+ SET device_id = ?,
+ is_online = 1,
+ updated_at = ?
+ WHERE id = ?
+ """,
+ (device_id, now, subu_id),
+ )
+ return
+
+ # Insert new row
+ conn.execute(
+ """
+ INSERT INTO subu (
+ owner,
+ name,
+ full_unix_name,
+ path,
+ netns_name,
+ wg_id,
+ device_id,
+ is_online,
+ created_at,
+ updated_at
+ )
+ VALUES (?, ?, ?, ?, ?, NULL, ?, 1, ?, ?)
+ """,
+ (
+ owner,
+ leaf_name,
+ full_unix_name,
+ full_path_str,
+ netns_name,
+ device_id,
+ now,
+ now,
+ ),
+ )
+
+
+def _reconcile_device_for_mount(conn, device_id: int, user_data_dir: Path):
+ """
+ Reconcile all subu on a particular device.
+
+ user_data_dir is a path like:
+
+ /mnt/Eagle/user_data
+
+ Under which we expect:
+
+ /mnt/Eagle/user_data/<masu>/subu_data/...
+ """
+ now = _utc_now()
+ discovered: set[tuple[str, str]] = set()
+
+ try:
+ owners = sorted(user_data_dir.iterdir(), key =lambda p: p.name)
+ except FileNotFoundError:
+ return
+
+ for owner_entry in owners:
+ if not owner_entry.is_dir():
+ continue
+
+ owner = owner_entry.name
+ subu_root = owner_entry / "subu_data"
+ if not subu_root.is_dir():
+ # masu with no subu_data; skip
+ continue
+
+ for subu_components in _walk_subu_paths(subu_root):
+ # Full logical path is: [owner] + subu_components
+ path_tokens = [owner] + subu_components
+ path_str = " ".join(path_tokens)
+ discovered.add((owner, path_str))
+
+ _ensure_subu_row(
+ conn =conn,
+ device_id =device_id,
+ owner =owner,
+ subu_path_components =subu_components,
+ full_path_str =path_str,
+ now =now,
+ )
+
+ # Mark any existing subu on this device that we did NOT see as offline.
+ cur = conn.execute(
+ "SELECT id, owner, path FROM subu WHERE device_id = ?",
+ (device_id,),
+ )
+ existing = cur.fetchall()
+ for row in existing:
+ key = (row["owner"], row["path"])
+ if key in discovered:
+ continue
+ conn.execute(
+ """
+ UPDATE subu
+ SET is_online = 0,
+ updated_at = ?
+ WHERE id = ?
+ """,
+ (now, row["id"]),
+ )
+
+
+def scan_and_reconcile(conn, base_dir: str ="/mnt") -> int:
+ """
+ Scan all mounted devices under base_dir for 'user_data' trees and
+ reconcile them into the database.
+
+ For each directory 'base_dir/<mapname>':
+
+ * If it contains 'user_data', it is treated as a device.
+ * A 'device' row is upserted (mapname = basename).
+ * All subu under the corresponding user_data tree are reconciled.
+
+ Returns:
+ Number of devices that were processed.
+ """
+ root = Path(base_dir)
+ if not root.is_dir():
+ return 0
+
+ processed = 0
+
+ for entry in sorted(root.iterdir(), key =lambda p: p.name):
+ if not entry.is_dir():
+ continue
+
+ mapname = entry.name
+ user_data_dir = entry / "user_data"
+ if not user_data_dir.is_dir():
+ continue
+
+ mount_point = str(entry)
+ device_id = _upsert_device(conn, mapname, mount_point)
+ _reconcile_device_for_mount(conn, device_id, user_data_dir)
+ processed += 1
+
+ conn.commit()
+ return processed
--- /dev/null
+# infrastructure/options_store.py
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+from pathlib import Path
+
+# Options file lives next to CLI in the manager release tree.
+# In dev it will be the same relative layout.
+OPTIONS_FILE = Path("subu.options")
+
+
+def load_options():
+ """
+ Load options from subu.options into a dictionary.
+
+ Lines are of the form: key=value
+ Lines starting with '#' or blank lines are ignored.
+ """
+ opts = {}
+ if not OPTIONS_FILE.exists():
+ return opts
+ text = OPTIONS_FILE.read_text(encoding="utf-8")
+ for line in text.splitlines():
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ if "=" not in line:
+ continue
+ k, v = line.split("=", 1)
+ opts[k.strip()] = v.strip()
+ return opts
+
+
+def save_options(opts: dict):
+ """
+ Save a dictionary of options back to subu.options.
+ """
+ lines = []
+ for k in sorted(opts.keys()):
+ v = opts[k]
+ lines.append(f"{k}={v}\n")
+ OPTIONS_FILE.write_text("".join(lines), encoding="utf-8")
+
+
+def set_option(name: str, value: str):
+ """
+ Set a single option key to a value.
+ """
+ opts = load_options()
+ opts[name] = value
+ save_options(opts)
+
+
+def get_option(name: str, default=None):
+ """
+ Get an option value by name, or default if missing.
+ """
+ opts = load_options()
+ return opts.get(name, default)
--- /dev/null
+-- schema.sql
+--
+-- Schema for subu manager, including device-aware subu tracking.
+
+-- Devices that can hold one or more masu homes.
+-- Each row represents a physical (or logical) storage volume
+-- identified by a mapname like 'Eagle' and optionally by UUIDs.
+CREATE TABLE device (
+ id INTEGER PRIMARY KEY,
+ mapname TEXT NOT NULL UNIQUE, -- e.g. 'Eagle'
+ fs_uuid TEXT, -- filesystem UUID (optional)
+ luks_uuid TEXT, -- LUKS UUID (optional)
+ mount_point TEXT NOT NULL, -- e.g. '/mnt/Eagle'
+ kind TEXT NOT NULL DEFAULT 'external', -- 'local','external','encrypted',...
+ state TEXT NOT NULL DEFAULT 'offline', -- 'online','offline','error'
+ last_seen TEXT NOT NULL -- ISO8601 UTC timestamp
+);
+
+-- parents via parent_id; one row per node in the tree
+CREATE TABLE subu_node (
+ id INTEGER PRIMARY KEY,
+ owner TEXT NOT NULL, -- masu
+ name TEXT NOT NULL, -- this segment (e.g., developer, bolt)
+ parent_id INTEGER, -- NULL for top-level subu under owner
+ full_unix_name TEXT NOT NULL UNIQUE, -- e.g., Thomas_developer_bolt
+ full_path TEXT NOT NULL, -- e.g., "Thomas developer bolt"
+ netns_name TEXT NOT NULL, -- default = full_unix_name
+ device_id INTEGER, -- NULL=local
+ is_online INTEGER NOT NULL DEFAULT 1,
+ created_at TEXT NOT NULL,
+ updated_at TEXT NOT NULL,
+ FOREIGN KEY(parent_id) REFERENCES subu_node(id),
+ FOREIGN KEY(device_id) REFERENCES device(id),
+ UNIQUE(owner, name, parent_id) -- no duplicate siblings
+);
+
+CREATE INDEX idx_node_owner_parent ON subu_node(owner, parent_id);
+CREATE INDEX idx_node_device ON subu_node(device_id);
+
--- /dev/null
+# infrastructure/unix.py
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+
+import os, subprocess, pwd, grp
+
+def _run(cmd: list[str]) -> int:
+ return subprocess.run(cmd, check =False).returncode
+
+def user_exists(name: str) -> bool:
+ try:
+ pwd.getpwnam(name); return True
+ except KeyError:
+ return False
+
+def group_exists(name: str) -> bool:
+ try:
+ grp.getgrnam(name); return True
+ except KeyError:
+ return False
+
+def ensure_group(name: str) -> None:
+ if group_exists(name): return
+ _run(["groupadd", "--force", name])
+
+def ensure_unix_user(user: str, primary_group: str) -> None:
+ """
+ Ensure Unix user and primary group exist with matching names.
+ """
+ ensure_group(primary_group)
+ if user_exists(user): return
+ # Create with home disabled; your tooling manages home dirs.
+ _run([
+ "useradd",
+ "--create-home", # harmless if home already bind-mounted later
+ "--shell", "/bin/bash",
+ "--gid", primary_group,
+ user,
+ ])
+
+def ensure_user_in_group(user: str, group: str) -> None:
+ ensure_group(group)
+ # usermod -a -G keeps existing supplementary groups
+ _run(["usermod", "-a", "-G", group, user])
+
+def remove_unix_user_and_group(user: str) -> None:
+ # Remove user, then drop group if empty
+ _run(["userdel", "-r", user])
+ if group_exists(user):
+ _run(["groupdel", user])
+
+def incommon_set_for_subu(masu: str, parts: list[str]) -> None:
+ """
+ Grant g+rx on the subu home dir and add all sibling subu users
+ under the same owner into this subu's group.
+ """
+ # Compute Unix names
+ owner_group = masu
+ subu_user = "_".join([masu] + parts)
+ subu_group = subu_user
+ # Directory path (owner’s subu_data path)
+ if not parts:
+ return
+ home = f"/home/{masu}/subu_data"
+ for seg in parts[:-1]:
+ home = f"{home}/{seg}/subu_data"
+ home = f"{home}/{parts[-1]}"
+ # chmod g+rx on the incommon subu home
+ _run(["chmod", "g+rx", home])
+ # Add all other subu under the owner into this group
+ # (simple, local discovery; DB-driven selection is also possible)
+ base = f"/home/{masu}/subu_data"
+ for entry in os.listdir(base):
+ # first-level siblings only; deeper policies can be added later
+ u = f"{masu}_{entry}"
+ if u == subu_user: # skip self
+ continue
+ if user_exists(u):
+ ensure_user_in_group(u, subu_group)
+
+def incommon_clear_for_subu(masu: str, parts: list[str]) -> None:
+ """
+ Revoke g+rx (set back to 700) and drop sibling subu from the group.
+ """
+ if not parts:
+ return
+ subu_user = "_".join([masu] + parts)
+ subu_group = subu_user
+ home = f"/home/{masu}/subu_data"
+ for seg in parts[:-1]:
+ home = f"{home}/{seg}/subu_data"
+ home = f"{home}/{parts[-1]}"
+ _run(["chmod", "0700", home])
+ # Remove siblings from the group
+ base = f"/home/{masu}/subu_data"
+ for entry in os.listdir(base):
+ u = f"{masu}_{entry}"
+ if u == subu_user: # skip self
+ continue
+ if user_exists(u):
+ _run(["gpasswd", "-d", u, subu_group])
+
+def mark_device_offline(mapname: str) -> None:
+ # reserved for later; actual DB write is done in dispatch.device_detach
+ pass
+
--- /dev/null
+# text.py
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+"""
+text.py — user-facing text for the subu manager CLI.
+"""
+
+class _Text:
+ def __init__(self, program_name: str):
+ self.program_name = program_name
+ self._version = "0.3.4"
+
+ # ---- Public API expected by CLI.py ---------------------------------------
+
+ def version(self) -> str:
+ return f"{self._version}\n"
+
+ def usage(self) -> str:
+ p = self.program_name
+ v = self._version
+ return (
+ f"{p} — Subu manager (v{v})\n"
+ "\n"
+ "Usage:\n"
+ f" {p} # usage\n"
+ f" {p} help # detailed help\n"
+ f" {p} example # example workflow\n"
+ f" {p} version # print version\n"
+ "\n"
+ f" {p} db load schema\n"
+ f" {p} db migrate subu->subu_node\n"
+ "\n"
+ f" {p} device scan [--base-dir DIR]\n"
+ f" {p} device attach <mapname>\n"
+ f" {p} device detach <mapname>\n"
+ "\n"
+ f" {p} subu make <masu> <subu> [<subu> ...]\n"
+ f" {p} subu capture <masu> <subu> [<subu> ...]\n"
+ f" {p} subu list\n"
+ f" {p} subu info subu_<id>\n"
+ f" {p} subu info <masu> <subu> [<subu> ...]\n"
+ f" {p} subu remove subu_<id>\n"
+ f" {p} subu remove <masu> <subu> [<subu> ...]\n"
+ f" {p} subu option set incommon subu_<id>\n"
+ f" {p} subu option set incommon <masu> <subu> [<subu> ...]\n"
+ f" {p} subu option clear incommon subu_<id>\n"
+ f" {p} subu option clear incommon <masu> <subu> [<subu> ...]\n"
+ "\n"
+ f" {p} lo up|down <Subu_ID>\n"
+ "\n"
+ f" {p} WG global <BaseCIDR>\n"
+ f" {p} WG make <host:port>\n"
+ f" {p} WG server_provided_public_key <WG_ID> <Base64Key>\n"
+ f" {p} WG info|information <WG_ID>\n"
+ f" {p} WG up <WG_ID>\n"
+ f" {p} WG down <WG_ID>\n"
+ "\n"
+ f" {p} attach WG <Subu_ID> <WG_ID>\n"
+ f" {p} detach WG <Subu_ID>\n"
+ "\n"
+ f" {p} network up|down <Subu_ID>\n"
+ "\n"
+ f" {p} option set <Subu_ID> <name> <value>\n"
+ f" {p} option get <Subu_ID> <name>\n"
+ f" {p} option list <Subu_ID>\n"
+ "\n"
+ f" {p} exec <Subu_ID> -- <cmd> ...\n"
+ )
+
+ def help(self) -> str:
+ p = self.program_name
+ return (
+ self.usage()
+ + "\n"
+ "Notes:\n"
+ f" * '{p} db load schema' must be run as root; it creates/updates the SQLite schema.\n"
+ f" * '{p} db migrate subu->subu_node' migrates legacy flat rows into the hierarchical table.\n"
+ " * Device commands work on already-mounted mapnames under /mnt (v1). 'scan' discovers\n"
+ " /mnt/<mapname>/user_data and captures all <masu>/subu_data trees into the DB.\n"
+ " * 'subu' commands manage both DB rows and their corresponding Unix users/groups.\n"
+ " You may address a subu by numeric ID (e.g. 'subu_3') or by path tokens:\n"
+ " <masu> <subu> [<subu> ...]\n"
+ " Path tokens must be non-empty and contain no underscore '_'. Proper nouns/acronyms\n"
+ " may be capitalized; hyphens are allowed inside tokens.\n"
+ " * 'subu option incommon' grants or revokes g+rx on the chosen subu home and adjusts\n"
+ " sibling subu group membership under the same <masu> (see policy in infrastructure/unix.py).\n"
+ " * WireGuard, attach/detach, network, option, and exec manage runtime properties of existing subu.\n"
+ "\n"
+ )
+
+ def example(self) -> str:
+ p = self.program_name
+ return (
+ "Example workflow:\n"
+ "\n"
+ f" # 1) Initialize schema (root)\n"
+ f" sudo {p} db load schema\n"
+ "\n"
+ f" # 2) Scan devices already mounted under /mnt (root)\n"
+ f" sudo {p} device scan\n"
+ "\n"
+ f" # 3) Capture a legacy subu present in /home/<masu>/subu_data (root)\n"
+ f" sudo {p} subu capture Thomas developer\n"
+ "\n"
+ f" # 4) List everything (any user)\n"
+ f" {p} subu list\n"
+ "\n"
+ f" # 5) Make a nested subu and then mark a top-level as incommon (root)\n"
+ f" sudo {p} subu make Thomas developer bolt\n"
+ f" sudo {p} subu option set incommon Thomas developer\n"
+ "\n"
+ f" # 6) Query by ID or by path (any user)\n"
+ f" {p} subu info subu_7\n"
+ f" {p} subu info Thomas developer bolt\n"
+ "\n"
+ )
+
+def make_text(program_name: str) -> _Text:
+ return _Text(program_name)
+++ /dev/null
-# dispatch.py
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-
-import os, sys
-import env
-from domain import subu as subu_domain
-from domain import device as device_domain
-from infrastructure.db import open_db, ensure_schema
-from infrastructure.options_store import set_option
-
-from infrastructure.unix import (
- ensure_unix_group,
- ensure_unix_user,
- ensure_user_in_group,
- remove_user_from_group,
- user_exists,
-)
-
-
-
-# lo_toggle, WG, attach, network, exec stubs remain below.
-
-
-def _require_root(action: str) -> bool:
- try:
- euid = os.geteuid()
- except AttributeError:
- return True
- if euid != 0:
- print(f"{action}: must be run as root", file=sys.stderr)
- return False
- return True
-
-
-def _db_path() -> str:
- return env.db_path()
-
-
-def _open_existing_db() -> sqlite3.Connection | None:
- path = _db_path()
- if not os.path.exists(path):
- print(
- f"subu: database does not exist at '{path}'.\n"
- f" Run 'db load schema' as root first.",
- file=sys.stderr,
- )
- return None
- try:
- conn = open_db(path)
- except Exception as e:
- print(f"subu: unable to open database '{path}': {e}", file=sys.stderr)
- return None
-
- conn.row_factory = sqlite3.Row
- return conn
-
-
-def db_load_schema() -> int:
- if not _require_root("db load schema"):
- return 1
-
- path = _db_path()
- db_dir = os.path.dirname(path) or "."
-
- try:
- os.makedirs(db_dir, mode=0o750, exist_ok=True)
- except PermissionError as e:
- print(f"subu: cannot create db directory '{db_dir}': {e}", file=sys.stderr)
- return 1
-
- try:
- conn = open_db(path)
- except Exception as e:
- print(f"subu: unable to open database '{path}': {e}", file=sys.stderr)
- return 1
-
- try:
- ensure_schema(conn)
- finally:
- conn.close()
-
- print(f"subu: schema loaded into {path}")
- return 0
-
-
-def device_scan(base_dir: str ="/mnt") -> int:
- """
- Handle:
-
- CLI.py device scan [--base-dir /mnt]
-
- Behavior:
- * Open the subu SQLite database.
- * Scan all directories under base_dir that contain 'user_data'.
- * For each such device:
- - Upsert a row in 'device'.
- - Reconcile all subu under user_data into 'subu', marking
- them as online and associating them with the device.
- - Mark any previously-known subu on that device that are not
- seen in this scan as offline.
-
- This function does NOT perform any cryptsetup, mount, or bindfs work.
- It assumes devices are already mounted at /mnt/<mapname>.
- """
- try:
- conn = open_db()
- except Exception as e:
- print(
- f"subu: cannot open database at '{env.db_path()}': {e}",
- file =sys.stderr,
- )
- return 1
-
- try:
- count = device_domain.scan_and_reconcile(conn, base_dir)
- if count == 0:
- print(f"no user_data devices found under {base_dir}")
- else:
- print(f"scanned {count} device(s) under {base_dir}")
- return 0
- finally:
- conn.close()
-
-
-def _insert_subu_row(conn, owner: str, subu_path: list[str], username: str) -> int | None:
- """Insert a row into subu table and return its id."""
- leaf_name = subu_path[-1]
- full_unix_name = username
- path_str = " ".join([owner] + subu_path)
- netns_name = full_unix_name
-
- from datetime import datetime, timezone
-
- now = datetime.now(timezone.utc).isoformat()
-
- try:
- cur = conn.execute(
- """INSERT INTO subu
- (owner, name, full_unix_name, path, netns_name, wg_id, created_at, updated_at)
- VALUES (?, ?, ?, ?, ?, NULL, ?, ?)""",
- (owner, leaf_name, full_unix_name, path_str, netns_name, now, now),
- )
- conn.commit()
- return cur.lastrowid
- except sqlite3.IntegrityError as e:
- print(
- f"subu: database already has an entry for '{full_unix_name}': {e}",
- file=sys.stderr,
- )
- return None
- except Exception as e:
- print(f"subu: error recording subu in database: {e}", file=sys.stderr)
- return None
-
-
-def _maybe_add_to_incommon(conn, owner: str, new_username: str) -> None:
- """If owner has an incommon subu configured, add new_username to that group."""
- key = f"incommon.{owner}"
- spec = get_option(key, None)
- if not spec:
- return
- if not isinstance(spec, str) or not spec.startswith("subu_"):
- print(
- f"subu: warning: option {key} has unexpected value '{spec}', "
- "expected 'subu_<id>'",
- file=sys.stderr,
- )
- return
- try:
- subu_numeric_id = int(spec.split("_", 1)[1])
- except ValueError:
- print(
- f"subu: warning: option {key} has invalid Subu_ID '{spec}'",
- file=sys.stderr,
- )
- return
-
- row = conn.execute(
- "SELECT full_unix_name FROM subu WHERE id = ? AND owner = ?",
- (subu_numeric_id, owner),
- ).fetchone()
- if row is None:
- print(
- f"subu: warning: option {key} refers to missing subu id {subu_numeric_id}",
- file=sys.stderr,
- )
- return
-
- incommon_unix = row["full_unix_name"]
- ensure_user_in_group(new_username, incommon_unix)
-
-
-def subu_make(path_tokens: list[str]) -> int:
- if not path_tokens or len(path_tokens) < 2:
- print(
- "subu: make requires at least <masu> and one <subu> component",
- file=sys.stderr,
- )
- return 2
-
- if not _require_root("subu make"):
- return 1
-
- masu = path_tokens[0]
- subu_path = path_tokens[1:]
-
- try:
- username = subu_domain.make_subu(masu, subu_path)
- except SystemExit as e:
- print(f"subu: {e}", file=sys.stderr)
- return 2
- except Exception as e:
- print(f"subu: error creating Unix user for {path_tokens}: {e}", file=sys.stderr)
- return 1
-
- conn = _open_existing_db()
- if conn is None:
- return 1
-
- subu_id = _insert_subu_row(conn, masu, subu_path, username)
- if subu_id is None:
- conn.close()
- return 1
-
- # If this owner has an incommon subu, join that group.
- _maybe_add_to_incommon(conn, masu, username)
-
- conn.close()
- print(f"subu_{subu_id}")
- return 0
-
-
-def subu_capture(path_tokens: list[str]) -> int:
- """Handle: subu capture <masu> <subu> [<subu> ...]
-
- Capture an existing Unix user into the database and fix its groups.
- """
- if not path_tokens or len(path_tokens) < 2:
- print(
- "subu: capture requires at least <masu> and one <subu> component",
- file=sys.stderr,
- )
- return 2
-
- if not _require_root("subu capture"):
- return 1
-
- masu = path_tokens[0]
- subu_path = path_tokens[1:]
-
- # Compute expected Unix username.
- try:
- username = subu_domain.subu_username(masu, subu_path)
- except SystemExit as e:
- print(f"subu: {e}", file=sys.stderr)
- return 2
-
- if not user_exists(username):
- print(f"subu: capture: Unix user '{username}' does not exist", file=sys.stderr)
- return 1
-
- # Ensure the primary group exists (legacy systems should already have it).
- ensure_unix_group(username)
-
- # Ensure membership in ancestor groups for traversal.
- ancestor_groups = subu_domain._ancestor_group_names(masu, subu_path)
- for gname in ancestor_groups:
- ensure_user_in_group(username, gname)
-
- conn = _open_existing_db()
- if conn is None:
- return 1
-
- subu_id = _insert_subu_row(conn, masu, subu_path, username)
- if subu_id is None:
- conn.close()
- return 1
-
- # Honor any incommon config for this owner.
- _maybe_add_to_incommon(conn, masu, username)
-
- conn.close()
- print(f"subu_{subu_id}")
- return 0
-
-
-def _resolve_subu(conn: sqlite3.Connection, target: str, rest: list[str]) -> sqlite3.Row | None:
- """Resolve a subu either by ID (subu_7) or by path."""
- if target.startswith("subu_") and not rest:
- try:
- subu_numeric_id = int(target.split("_", 1)[1])
- except ValueError:
- print(f"subu: invalid Subu_ID '{target}'", file=sys.stderr)
- return None
-
- row = conn.execute("SELECT * FROM subu WHERE id = ?", (subu_numeric_id,)).fetchone()
- if row is None:
- print(f"subu: no such subu with id {subu_numeric_id}", file=sys.stderr)
- return row
-
- path_tokens = [target] + list(rest)
- if len(path_tokens) < 2:
- print(
- "subu: path form requires at least <masu> and one <subu> component",
- file=sys.stderr,
- )
- return None
-
- owner = path_tokens[0]
- path_str = " ".join(path_tokens)
-
- row = conn.execute(
- "SELECT * FROM subu WHERE owner = ? AND path = ?",
- (owner, path_str),
- ).fetchone()
-
- if row is None:
- print(f"subu: no such subu with owner='{owner}' and path='{path_str}'", file=sys.stderr)
- return row
-
-
-def subu_list() -> int:
- conn = _open_existing_db()
- if conn is None:
- return 1
-
- cur = conn.execute(
- "SELECT id, owner, path, full_unix_name, netns_name, wg_id FROM subu ORDER BY id"
- )
- rows = cur.fetchall()
- conn.close()
-
- if not rows:
- print("(no subu in database)")
- return 0
-
- for row in rows:
- subu_id = row[0]
- owner = row[1]
- path = row[2]
- full_unix_name = row[3]
- netns_name = row[4]
- wg_id = row[5]
- wg_display = "-" if wg_id is None else f"WG_{wg_id}"
- print(f"subu_{subu_id}\t{owner}\t{path}\t{full_unix_name}\t{netns_name}\t{wg_display}")
-
- return 0
-
-
-def subu_info(target: str, rest: list[str]) -> int:
- conn = _open_existing_db()
- if conn is None:
- return 1
-
- row = _resolve_subu(conn, target, rest)
- if row is None:
- conn.close()
- return 1
-
- subu_id = row["id"]
- owner = row["owner"]
- name = row["name"]
- full_unix_name = row["full_unix_name"]
- path = row["path"]
- netns_name = row["netns_name"]
- wg_id = row["wg_id"]
- created_at = row["created_at"]
- updated_at = row["updated_at"]
-
- conn.close()
-
- print(f"Subu_ID: subu_{subu_id}")
- print(f"Owner: {owner}")
- print(f"Name: {name}")
- print(f"Path: {path}")
- print(f"Unix user: {full_unix_name}")
- print(f"Netns: {netns_name}")
- print(f"WG_ID: {wg_id if wg_id is not None else '-'}")
- print(f"Created: {created_at}")
- print(f"Updated: {updated_at}")
- return 0
-
-
-def subu_remove(target: str, rest: list[str]) -> int:
- if not _require_root("subu remove"):
- return 1
-
- conn = _open_existing_db()
- if conn is None:
- return 1
-
- row = _resolve_subu(conn, target, rest)
- if row is None:
- conn.close()
- return 1
-
- subu_id = row["id"]
- path_str = row["path"]
- path_tokens = path_str.split(" ")
- if len(path_tokens) < 2:
- print(f"subu: stored path is invalid for id {subu_id}: '{path_str}'", file=sys.stderr)
- conn.close()
- return 1
-
- masu = path_tokens[0]
- subu_path = path_tokens[1:]
-
- try:
- username = subu_domain.remove_subu(masu, subu_path)
- except SystemExit as e:
- print(f"subu: {e}", file=sys.stderr)
- conn.close()
- return 2
- except Exception as e:
- print(f"subu: error removing Unix user for id subu_{subu_id}: {e}", file=sys.stderr)
- conn.close()
- return 1
-
- try:
- conn.execute("DELETE FROM subu WHERE id = ?", (subu_id,))
- conn.commit()
- except Exception as e:
- print(f"subu: error removing database row for id subu_{subu_id}: {e}", file=sys.stderr)
- conn.close()
- return 1
-
- conn.close()
- print(f"removed subu_{subu_id} {username}")
- return 0
-
-
-def _subu_home_path(owner: str, path_str: str) -> str:
- """Compute subu home dir from owner and path string."""
- tokens = path_str.split(" ")
- if not tokens or tokens[0] != owner:
- return ""
- subu_tokens = tokens[1:]
- path = os.path.join("/home", owner)
- for t in subu_tokens:
- path = os.path.join(path, "subu_data", t)
- return path
-
-
-def _chmod_incommon(home: str) -> None:
- try:
- st = os.stat(home)
- except FileNotFoundError:
- print(f"subu: warning: incommon home '{home}' does not exist", file=sys.stderr)
- return
-
- mode = st.st_mode
- mode |= (stat.S_IRGRP | stat.S_IXGRP)
- mode &= ~(stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
- os.chmod(home, mode)
-
-
-def _chmod_private(home: str) -> None:
- try:
- st = os.stat(home)
- except FileNotFoundError:
- print(f"subu: warning: home '{home}' does not exist for clear incommon", file=sys.stderr)
- return
-
- mode = st.st_mode
- mode &= ~(stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)
- os.chmod(home, mode)
-
-
-def subu_option_incommon(action: str, target: str, rest: list[str]) -> int:
- """Handle:
-
- subu option set incommon <Subu_ID>|<masu> <subu> [<subu> ...]
- subu option clear incommon <Subu_ID>|<masu> <subu> [<subu> ...]
- """
- if not _require_root(f"subu option {action} incommon"):
- return 1
-
- conn = _open_existing_db()
- if conn is None:
- return 1
-
- row = _resolve_subu(conn, target, rest)
- if row is None:
- conn.close()
- return 1
-
- subu_id = row["id"]
- owner = row["owner"]
- full_unix_name = row["full_unix_name"]
- path_str = row["path"]
-
- key = f"incommon.{owner}"
- spec = f"subu_{subu_id}"
-
- if action == "set":
- # Record mapping.
- set_option(key, spec)
-
- # Make all subu of this owner members of this group.
- cur = conn.execute(
- "SELECT full_unix_name FROM subu WHERE owner = ?",
- (owner,),
- )
- rows = cur.fetchall()
- for r in rows:
- uname = r["full_unix_name"]
- if uname == full_unix_name:
- continue
- ensure_user_in_group(uname, full_unix_name)
-
- # Adjust directory permissions on incommon home.
- home = _subu_home_path(owner, path_str)
- if home:
- _chmod_incommon(home)
-
- conn.close()
- print(f"incommon for {owner} set to subu_{subu_id}")
- return 0
-
- # clear
- current = get_option(key, "")
- if current and current != spec:
- print(
- f"subu: incommon for owner '{owner}' is currently {current}, not {spec}",
- file=sys.stderr,
- )
- conn.close()
- return 1
-
- # Clear mapping.
- set_option(key, "")
-
- # Remove other subu from this group.
- cur = conn.execute(
- "SELECT full_unix_name FROM subu WHERE owner = ?",
- (owner,),
- )
- rows = cur.fetchall()
- for r in rows:
- uname = r["full_unix_name"]
- if uname == full_unix_name:
- continue
- remove_user_from_group(uname, full_unix_name)
-
- home = _subu_home_path(owner, path_str)
- if home:
- _chmod_private(home)
-
- conn.close()
- print(f"incommon for {owner} cleared from subu_{subu_id}")
- return 0
-
-
-# --- existing stubs (unchanged) -------------------------------------------
-
-def wg_global(arg1: str | None) -> int:
- print("WG global: not yet implemented", file=sys.stderr)
- return 1
-
-
-def wg_make(arg1: str | None) -> int:
- print("WG make: not yet implemented", file=sys.stderr)
- return 1
-
-
-def wg_server_public_key(arg1: str | None, arg2: str | None) -> int:
- print("WG server_provided_public_key: not yet implemented", file=sys.stderr)
- return 1
-
-
-def wg_info(arg1: str | None) -> int:
- print("WG info: not yet implemented", file=sys.stderr)
- return 1
-
-
-def wg_up(arg1: str | None) -> int:
- print("WG up: not yet implemented", file=sys.stderr)
- return 1
-
-
-def wg_down(arg1: str | None) -> int:
- print("WG down: not yet implemented", file=sys.stderr)
- return 1
-
-
-def attach_wg(subu_id: str, wg_id: str) -> int:
- print("attach WG: not yet implemented", file=sys.stderr)
- return 1
-
-
-def detach_wg(subu_id: str) -> int:
- print("detach WG: not yet implemented", file=sys.stderr)
- return 1
-
-
-def network_toggle(subu_id: str, state: str) -> int:
- print("network up/down: not yet implemented", file=sys.stderr)
- return 1
-
-
-def lo_toggle(subu_id: str, state: str) -> int:
- print("lo up/down: not yet implemented", file=sys.stderr)
- return 1
-
-
-def exec(subu_id: str, cmd_argv: list[str]) -> int:
- print("exec: not yet implemented", file=sys.stderr)
- return 1
+++ /dev/null
-# domain/device.py
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-
-"""
-Device-aware reconciliation of subu state.
-
-This module assumes:
- * Devices with user data are mounted as: /mnt/<mapname>
- * On each device, user data lives under: /mnt/<mapname>/user_data/<masu>
- * Subu home directories follow the pattern:
-
- /mnt/<mapname>/user_data/<masu>/subu_data/<subu0>/subu_data/<subu1>/...
-
- i.e., each subu directory may contain a 'subu_data' directory for children.
-
-Given an open SQLite connection, scan_and_reconcile() will:
-
- * Discover all devices under a base directory (default: /mnt)
- * For each device that has 'user_data':
- - Upsert a row in the 'device' table.
- - Discover all subu paths for all masus on that device.
- - Upsert/refresh rows in 'subu' with device_id + is_online=1.
- - Mark any previously-known subu on that device that are not seen
- in the current scan as is_online=0.
-"""
-
-import os
-from datetime import datetime
-from pathlib import Path
-
-from domain.subu import subu_username
-
-
-def _utc_now() -> str:
- """
- Return a UTC timestamp string suitable for created_at/updated_at/last_seen.
- Example: '2025-11-11T05:30:12Z'
- """
- return datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
-
-
-def _walk_subu_paths(subu_root: Path):
- """
- Yield all subu paths under a root 'subu_data' directory.
-
- Layout assumption:
-
- subu_root/
- S0/
- ...files...
- subu_data/
- S1/
- ...
- subu_data/
- S2/
- ...
-
- For each logical path:
- ['S0'] (top-level)
- ['S0','S1'] (child)
- ['S0','S1','S2'] (grand-child)
- ...
-
- we yield the list of path components.
- """
- stack: list[tuple[Path, list[str]]] = [(subu_root, [])]
-
- while stack:
- current_root, prefix = stack.pop()
- try:
- entries = sorted(current_root.iterdir(), key =lambda p: p.name)
- except FileNotFoundError:
- continue
-
- for entry in entries:
- if not entry.is_dir():
- continue
- name = entry.name
- path_components = prefix + [name]
- yield path_components
-
- child_subu_data = entry / "subu_data"
- if child_subu_data.is_dir():
- stack.append((child_subu_data, path_components))
-
-
-def _upsert_device(
- conn,
- mapname: str,
- mount_point: str,
- kind: str ="external",
-) -> int:
- """
- Ensure a row exists for this device and return its id.
-
- We do NOT try to discover fs_uuid/luks_uuid here; those can be filled
- in later if desired.
- """
- now = _utc_now()
-
- cur = conn.execute(
- "SELECT id FROM device WHERE mapname = ?",
- (mapname,),
- )
- row = cur.fetchone()
-
- if row:
- device_id = row["id"]
- conn.execute(
- """
- UPDATE device
- SET mount_point = ?,
- kind = ?,
- state = 'online',
- last_seen = ?
- WHERE id = ?
- """,
- (mount_point, kind, now, device_id),
- )
- else:
- cur = conn.execute(
- """
- INSERT INTO device (mapname, mount_point, kind, state, last_seen)
- VALUES (?, ?, ?, 'online', ?)
- """,
- (mapname, mount_point, kind, now),
- )
- device_id = cur.lastrowid
-
- return int(device_id)
-
-
-def _ensure_subu_row(
- conn,
- device_id: int,
- owner: str,
- subu_path_components: list[str],
- full_path_str: str,
- now: str,
-):
- """
- Upsert a row in 'subu' for (owner, subu_path_components) on device_id.
-
- full_path_str is the human-readable path, e.g. 'Thomas local' or
- 'Thomas developer bolt'.
- """
- if not subu_path_components:
- return
-
- leaf_name = subu_path_components[-1]
- full_unix_name = subu_username(owner, subu_path_components)
-
- # For now, we simply reuse full_unix_name as netns_name.
- netns_name = full_unix_name
-
- # See if a row already exists for this owner + path.
- cur = conn.execute(
- "SELECT id FROM subu WHERE owner = ? AND path = ?",
- (owner, full_path_str),
- )
- row = cur.fetchone()
-
- if row:
- subu_id = row["id"]
- conn.execute(
- """
- UPDATE subu
- SET device_id = ?,
- is_online = 1,
- updated_at = ?
- WHERE id = ?
- """,
- (device_id, now, subu_id),
- )
- return
-
- # Insert new row
- conn.execute(
- """
- INSERT INTO subu (
- owner,
- name,
- full_unix_name,
- path,
- netns_name,
- wg_id,
- device_id,
- is_online,
- created_at,
- updated_at
- )
- VALUES (?, ?, ?, ?, ?, NULL, ?, 1, ?, ?)
- """,
- (
- owner,
- leaf_name,
- full_unix_name,
- full_path_str,
- netns_name,
- device_id,
- now,
- now,
- ),
- )
-
-
-def _reconcile_device_for_mount(conn, device_id: int, user_data_dir: Path):
- """
- Reconcile all subu on a particular device.
-
- user_data_dir is a path like:
-
- /mnt/Eagle/user_data
-
- Under which we expect:
-
- /mnt/Eagle/user_data/<masu>/subu_data/...
- """
- now = _utc_now()
- discovered: set[tuple[str, str]] = set()
-
- try:
- owners = sorted(user_data_dir.iterdir(), key =lambda p: p.name)
- except FileNotFoundError:
- return
-
- for owner_entry in owners:
- if not owner_entry.is_dir():
- continue
-
- owner = owner_entry.name
- subu_root = owner_entry / "subu_data"
- if not subu_root.is_dir():
- # masu with no subu_data; skip
- continue
-
- for subu_components in _walk_subu_paths(subu_root):
- # Full logical path is: [owner] + subu_components
- path_tokens = [owner] + subu_components
- path_str = " ".join(path_tokens)
- discovered.add((owner, path_str))
-
- _ensure_subu_row(
- conn =conn,
- device_id =device_id,
- owner =owner,
- subu_path_components =subu_components,
- full_path_str =path_str,
- now =now,
- )
-
- # Mark any existing subu on this device that we did NOT see as offline.
- cur = conn.execute(
- "SELECT id, owner, path FROM subu WHERE device_id = ?",
- (device_id,),
- )
- existing = cur.fetchall()
- for row in existing:
- key = (row["owner"], row["path"])
- if key in discovered:
- continue
- conn.execute(
- """
- UPDATE subu
- SET is_online = 0,
- updated_at = ?
- WHERE id = ?
- """,
- (now, row["id"]),
- )
-
-
-def scan_and_reconcile(conn, base_dir: str ="/mnt") -> int:
- """
- Scan all mounted devices under base_dir for 'user_data' trees and
- reconcile them into the database.
-
- For each directory 'base_dir/<mapname>':
-
- * If it contains 'user_data', it is treated as a device.
- * A 'device' row is upserted (mapname = basename).
- * All subu under the corresponding user_data tree are reconciled.
-
- Returns:
- Number of devices that were processed.
- """
- root = Path(base_dir)
- if not root.is_dir():
- return 0
-
- processed = 0
-
- for entry in sorted(root.iterdir(), key =lambda p: p.name):
- if not entry.is_dir():
- continue
-
- mapname = entry.name
- user_data_dir = entry / "user_data"
- if not user_data_dir.is_dir():
- continue
-
- mount_point = str(entry)
- device_id = _upsert_device(conn, mapname, mount_point)
- _reconcile_device_for_mount(conn, device_id, user_data_dir)
- processed += 1
-
- conn.commit()
- return processed
+++ /dev/null
-"""
-4.5 domain/exec.py
-
-Run a command inside a subu’s namespace and UID.
-
-4.5.1 run_in_subu(subu: Subu, cmd_argv: list[str]) -> int
-"""
-def exec_in_subu(subu_id: str, cmd: list):
- sid = int(subu_id.split("_")[1])
- with closing(_db()) as db:
- ns = db.execute("SELECT netns FROM subu WHERE id=?", (sid,)).fetchone()[0]
- os.execvp("ip", ["ip","netns","exec", ns] + cmd)
+++ /dev/null
-"""
-4.3 domain/network.py
-
-Netns + device wiring, including aggregate “network up/down”.
-
-4.3.1 lo_toggle(subu: Subu, state: str) -> None
-4.3.2 attach_wg(subu: Subu, wg: WG) -> None
-4.3.3 detach_wg(subu: Subu) -> None
-4.3.4 network_toggle(subu: Subu, state: str) -> None
-"""
-def network_toggle(subu_id: str, state: str):
- sid = int(subu_id.split("_")[1])
- with closing(_db()) as db:
- ns, wid = db.execute("SELECT netns,wg_id FROM subu WHERE id=?", (sid,)).fetchone()
- # always make sure lo up on 'up'
- if state == "up":
- run(["ip", "netns", "exec", ns, "ip", "link", "set", "lo", "up"], check=False)
- if wid is not None:
- ifname = f"subu_{wid}"
- run(["ip", "-n", ns, "link", "set", "dev", ifname, state], check=False)
- with closing(_db()) as db:
- db.execute("UPDATE subu SET network_state=? WHERE id=?", (state, sid))
- db.commit()
- print(f"{subu_id}: network {state}")
-
-def _make_netns_for_subu(subu_id_num: int, netns_name: str):
- """
- Create the network namespace & bring lo down.
- """
- # ip netns add ns-subu_<id>
- run(["ip", "netns", "add", netns_name])
- # ip netns exec ns-subu_<id> ip link set lo down
- run(["ip", "netns", "exec", netns_name, "ip", "link", "set", "lo", "down"])
+++ /dev/null
-"""
-4.4 domain/options.py
-
-Per-subu options, backed by DB.
-
-4.4.1 set_option(subu_id: str, name: str, value: str) -> None
-4.4.2 get_option(subu_id: str, name: str) -> str | None
-4.4.3 list_options(subu_id: str) -> dict[str, str]
-"""
-def option_set(subu_id: str, name: str, value: str):
- sid = int(subu_id.split("_")[1])
- with closing(_db()) as db:
- db.execute("INSERT INTO options (subu_id,name,value) VALUES(?,?,?) "
- "ON CONFLICT(subu_id,name) DO UPDATE SET value=excluded.value",
- (sid, name, value))
- db.commit()
- print("ok")
-
-def option_get(subu_id: str, name: str):
- sid = int(subu_id.split("_")[1])
- with closing(_db()) as db:
- row = db.execute("SELECT value FROM options WHERE subu_id=? AND name=?", (sid,name)).fetchone()
- print(row[0] if row else "")
-
-def option_list(subu_id: str):
- sid = int(subu_id.split("_")[1])
- with closing(_db()) as db:
- rows = db.execute("SELECT name,value FROM options WHERE subu_id=?", (sid,)).fetchall()
- for n,v in rows:
- print(f"{n}={v}")
-
+++ /dev/null
-# domain/subu.py
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-
-from infrastructure.unix import (
- ensure_unix_user,
- ensure_user_in_group,
- remove_unix_user_and_group,
- user_exists,
-)
-from typing import Iterable
-import sqlite3, datetime
-
-def _now(): return datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
-
-def subu_username(owner: str, parts: list[str]) -> str:
- return "_".join([owner] + parts)
-
-def ensure_chain(conn, owner: str, parts: list[str], device_id: int|None, online: bool):
- """
- Ensure that owner/parts[...] exists as a chain; return leaf row (dict).
- """
- conn.row_factory = sqlite3.Row
- parent_id = None
- chain: list[str] = []
- now = _now()
- for seg in parts:
- row = conn.execute(
- "SELECT * FROM subu_node WHERE owner=? AND name=? AND parent_id IS ?",
- (owner, seg, parent_id)
- ).fetchone()
- if row:
- parent_id = row["id"]
- chain.append(seg)
- continue
- chain.append(seg)
- full_path = " ".join([owner] + chain)
- full_unix = subu_username(owner, chain)
- netns = full_unix
- conn.execute(
- """INSERT INTO subu_node(owner,name,parent_id,full_unix_name,full_path,netns_name,
- device_id,is_online,created_at,updated_at)
- VALUES(?,?,?,?,?,?,?, ?,?,?)""",
- (owner, seg, parent_id, full_unix, full_path, netns,
- device_id, 1 if online else 0, now, now)
- )
- parent_id = conn.execute("SELECT last_insert_rowid() id").fetchone()["id"]
- leaf = conn.execute("SELECT * FROM subu_node WHERE id=?", (parent_id,)).fetchone()
- return dict(leaf)
-
-def find_by_path(conn, owner: str, parts: list[str]):
- conn.row_factory = sqlite3.Row
- parent_id = None
- for seg in parts:
- row = conn.execute(
- "SELECT * FROM subu_node WHERE owner=? AND name=? AND parent_id IS ?",
- (owner, seg, parent_id)
- ).fetchone()
- if not row:
- return None
- parent_id = row["id"]
- return dict(row)
-
-def list_children(conn, node_id: int|None, owner: str):
- """
- node_id=None lists top-level subu of owner; otherwise children of node_id.
- """
- conn.row_factory = sqlite3.Row
- if node_id is None:
- cur = conn.execute("SELECT * FROM subu_node WHERE owner=? AND parent_id IS NULL ORDER BY name", (owner,))
- else:
- cur = conn.execute("SELECT * FROM subu_node WHERE owner=? AND parent_id=? ORDER BY name", (owner, node_id))
- return [dict(r) for r in cur.fetchall()]
-
-def _validate_token(label: str, token: str) -> str:
- """
- Validate a single path token (masu or subu).
-
- Rules:
- - must be non-empty after stripping whitespace
- - must not contain underscore '_'
- """
- token_stripped = token.strip()
- if not token_stripped:
- raise SystemExit(f"subu: {label} name must be non-empty")
- if "_" in token_stripped:
- raise SystemExit(
- f"subu: {label} name '{token_stripped}' must not contain underscore '_'"
- )
- # dashes are fine; acronyms and proper nouns are fine.
- return token_stripped
-
-
-def _parent_username(masu: str, path_components: list[str]) -> str | None:
- """
- Return the Unix username of the parent subu, or None if this is top-level.
-
- Examples:
- masu="Thomas", path=["S0"] -> None (parent is just the masu)
- masu="Thomas", path=["S0","S1"] -> "Thomas_S0"
- """
- if len(path_components) <= 1:
- return None
- # parent path is everything except last token
- parent_path = path_components[:-1]
- return subu_username(masu, parent_path)
-
-
-def _ancestor_group_names(masu: str, path_components: list[str]) -> list[str]:
- """
- Compute ancestor groups that a subu must join for directory traversal.
-
- For path:
- [masu, s1, s2, ..., sk]
-
- we return:
- [masu,
- masu_s1,
- masu_s1_s2,
- ...,
- masu_s1_..._s{k-1}]
-
- The last element (full username) is NOT included, because that is
- the subu's own primary group.
- """
- groups: list[str] = []
- # masu group (allows traversal of /home/masu and /home/masu/subu_data)
- groups.append(_validate_token("masu", masu))
-
- # For deeper subu, add each ancestor subu's group
- for depth in range(1, len(path_components)):
- prefix = path_components[:depth]
- groups.append(subu_username(masu, prefix))
-
- return groups
-
-
-def make_subu(masu: str, path_components: list[str]) -> str:
- """
- Make the Unix user and group for this subu.
-
- The subu path is:
- masu subu subu ...
-
- Rules:
- - len(path_components) >= 1
- - tokens must not contain '_'
- - parent must exist:
- * for first-level subu: Unix user 'masu' must exist
- * for deeper subu: parent subu unix user must exist
-
- Returns:
- Unix username, for example 'Thomas_S0' or 'Thomas_S0_S1'.
- """
- if not path_components:
- raise SystemExit("subu: make requires at least one subu component")
-
- # Normalize and validate tokens (this will raise SystemExit on error).
- # subu_username will call _validate_token internally.
- username = subu_username(masu, path_components)
-
- # Enforce parent existence
- parent_uname = _parent_username(masu, path_components)
- if parent_uname is None:
- # Top-level subu: require the masu Unix user to exist
- masu_name = _validate_token("masu", masu)
- if not user_exists(masu_name):
- raise SystemExit(
- f"subu: cannot make '{username}': "
- f"masu Unix user '{masu_name}' does not exist"
- )
- else:
- # Deeper subu: require parent subu Unix user to exist
- if not user_exists(parent_uname):
- raise SystemExit(
- f"subu: cannot make '{username}': "
- f"parent subu unix user '{parent_uname}' does not exist"
- )
-
- # For now, group and user share the same name.
- ensure_unix_user(username, username)
-
- # Add this subu to the ancestor groups so that directory traversal works:
- # /home/masu
- # /home/masu/subu_data
- # /home/masu/subu_data/<parent>/subu_data/...
- ancestor_groups = _ancestor_group_names(masu, path_components)
- for gname in ancestor_groups:
- ensure_user_in_group(username, gname)
-
- return username
-
-def remove_subu(masu: str, path_components: list[str]) -> str:
- """
- Remove the Unix user and group for this subu, if they exist.
-
- The subu path is:
- masu subu subu ...
-
- Returns:
- Unix username that was targeted.
- """
- if not path_components:
- raise SystemExit("subu: remove requires at least one subu component")
-
- username = subu_username(masu, path_components)
- remove_unix_user_and_group(username)
- return username
+++ /dev/null
-"""
-4.2 domain/wg.py
-
-WireGuard objects, independent of subu.
-
-4.2.1 set_global_pool(base_cidr: str) -> None
-4.2.2 make_wg(endpoint: str) -> WG
-4.2.3 set_server_public_key(wg_id: str, key: str) -> None
-4.2.4 get_wg(wg_id: str) -> WG
-4.2.5 bring_up(wg_id: str) -> None
-4.2.6 bring_down(wg_id: str) -> None
-"""
-
-def wg_global(basecidr: str):
- WG_GLOBAL_FILE.write_text(basecidr.strip()+"\n")
- print(f"WG pool base = {basecidr}")
-
-def _alloc_ip(idx: int, base: str) -> str:
- # simplistic /24 allocator: base must be x.y.z.0/24
- prefix = base.split("/")[0].rsplit(".", 1)[0]
- host = 2 + idx
- return f"{prefix}.{host}/32"
-
-def wg_make(endpoint: str) -> str:
- if not WG_GLOBAL_FILE.exists():
- raise RuntimeError("set WG base with `subu WG global <CIDR>` first")
- base = WG_GLOBAL_FILE.read_text().strip()
- with closing(_db()) as db:
- c = db.cursor()
- idx = c.execute("SELECT COUNT(*) FROM wg").fetchone()[0]
- local_ip = _alloc_ip(idx, base)
- c.execute("INSERT INTO wg (endpoint, local_ip, allowed_ips) VALUES (?, ?, ?)",
- (endpoint, local_ip, "0.0.0.0/0"))
- wid = c.lastrowid
- db.commit()
- print(f"WG_{wid} endpoint={endpoint} ip={local_ip}")
- return f"WG_{wid}"
-
-def wg_set_pubkey(wg_id: str, key: str):
- wid = int(wg_id.split("_")[1])
- with closing(_db()) as db:
- db.execute("UPDATE wg SET pubkey=? WHERE id=?", (key, wid))
- db.commit()
- print("ok")
-
-def wg_info(wg_id: str):
- wid = int(wg_id.split("_")[1])
- with closing(_db()) as db:
- row = db.execute("SELECT * FROM wg WHERE id=?", (wid,)).fetchone()
- print(row if row else "not found")
-
-def wg_up(wg_id: str):
- wid = int(wg_id.split("_")[1])
- # Admin-up of WG device handled via network_toggle once attached.
- print(f"{wg_id}: up (noop until attached)")
-
-def wg_down(wg_id: str):
- wid = int(wg_id.split("_")[1])
- print(f"{wg_id}: down (noop until attached)")
-
+++ /dev/null
-# env.py
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-
-from pathlib import Path
-
-
-def version() -> str:
- """
- Software / CLI version.
- """
- return "0.3.5"
-
-
-def db_schema_version() -> str:
- """
- Database schema version (used in the DB filename).
-
- Bump this only when the DB layout/semantics change,
- not for every CLI code change.
- """
- return "0.1"
-
-
-def db_root_dir() -> Path:
- """
- Root directory for the manager database.
- """
- return Path("/opt/subu")
-
-
-def db_filename() -> str:
- """
- Filename of the SQLite database, relative to db_root_dir.
- """
- return f"subu_{db_schema_version()}.sqlite3"
-
-
-def db_path() -> str:
- """
- Full path to the SQLite database file.
-
- Currently this is:
-
- /opt/subu/subu_<schema>.sqlite3
-
- There is deliberately no environment override here; this path
- defines the canonical system-wide DB used by all manager invocations.
- """
- return str(db_root_dir() / db_filename())
+++ /dev/null
-"""
-bpf.py
-
-Compile/load the BPF program.
-
-5.3.1 compile_bpf(source_path: str, output_path: str) -> None
-5.3.2 load_bpf(obj_path: str) -> BpfHandle
-"""
-
-def attach_wg(subu_id: str, wg_id: str):
- ensure_mounts()
- sid = int(subu_id.split("_")[1]); wid = int(wg_id.split("_")[1])
- with closing(_db()) as db:
- r = db.execute("SELECT netns FROM subu WHERE id=?", (sid,)).fetchone()
- if not r: raise ValueError("subu not found")
- ns = r[0]
- w = db.execute("SELECT endpoint, local_ip, pubkey FROM wg WHERE id=?", (wid,)).fetchone()
- if not w: raise ValueError("WG not found")
- endpoint, local_ip, pubkey = w
-
- ifname = f"subu_{wid}"
- # make WG link in init ns, move to netns
- run(["ip", "link", "add", ifname, "type", "wireguard"])
- run(["ip", "link", "set", ifname, "netns", ns])
- run(["ip", "-n", ns, "addr", "add", local_ip, "dev", ifname], check=False)
- run(["ip", "-n", ns, "link", "set", "dev", ifname, "mtu", "1420"])
- run(["ip", "-n", ns, "link", "set", "dev", ifname, "down"]) # keep engine down until `network up`
-
- # install steering (MVP: make cgroup + attach bpf program)
- try:
- install_steering(subu_id, ns, ifname)
- print(f"{subu_id}: eBPF steering installed -> {ifname}")
- except BpfError as e:
- print(f"{subu_id}: steering warning: {e}")
-
- with closing(_db()) as db:
- db.execute("UPDATE subu SET wg_id=? WHERE id=?", (wid, sid))
- db.commit()
- print(f"attached {wg_id} to {subu_id} in {ns} as {ifname}")
-
-def detach_wg(subu_id: str):
- ensure_mounts()
- sid = int(subu_id.split("_")[1])
- with closing(_db()) as db:
- r = db.execute("SELECT netns,wg_id FROM subu WHERE id=?", (sid,)).fetchone()
- if not r: print("not found"); return
- ns, wid = r
- if wid is None:
- print("nothing attached"); return
- ifname = f"subu_{wid}"
- run(["ip", "-n", ns, "link", "del", ifname], check=False)
- try:
- remove_steering(subu_id)
- except BpfError as e:
- print(f"steering remove warn: {e}")
- with closing(_db()) as db:
- db.execute("UPDATE subu SET wg_id=NULL WHERE id=?", (sid,))
- db.commit()
- print(f"detached WG_{wid} from {subu_id}")
-
+++ /dev/null
-// -*- mode: c; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*-
-// bpf_force_egress.c — MVP scaffold to validate UID and prep metadata
-/*
- bpf_force_egress.c
-
-5.5.1 no callable Python API; compiled/used via bpf.py.
-*/
-#include <linux/bpf.h>
-#include <bpf/bpf_helpers.h>
-#include <bpf/bpf_endian.h>
-
-
-char LICENSE[] SEC("license") = "GPL";
-
-struct {
- __uint(type, BPF_MAP_TYPE_HASH);
- __type(key, __u32); // tgid
- __type(value, __u32); // reserved (target ifindex placeholder)
- __uint(max_entries, 1024);
-} subu_tgid2if SEC(".maps");
-
-// Helper: return 0 = allow, <0 reject
-static __always_inline int allow_uid(struct bpf_sock_addr *ctx) {
- // MVP: just accept everyone; you can gate on UID 2017 with bpf_get_current_uid_gid()
- // __u32 uid = (__u32)(bpf_get_current_uid_gid() & 0xffffffff);
- // if (uid != 2017) return -1;
- return 0;
-}
-
-// Hook: cgroup/connect4 — runs before connect(2) proceeds
-SEC("cgroup/connect4")
-int subu_connect4(struct bpf_sock_addr *ctx)
-{
- if (allow_uid(ctx) < 0) return -1;
- // Future: read pinned map/meta, set SO_* via bpf_setsockopt when permitted
- return 0;
-}
-
-// Hook: cgroup/post_bind4 — runs after a local bind is chosen
-SEC("cgroup/post_bind4")
-int subu_post_bind4(struct bpf_sock *sk)
-{
- // Future: enforce bound dev if kernel helper allows; record tgid->ifindex
- __u32 tgid = bpf_get_current_pid_tgid() >> 32;
- __u32 val = 0;
- bpf_map_update_elem(&subu_tgid2if, &tgid, &val, BPF_ANY);
- return 0;
-}
+++ /dev/null
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-"""
-bpf_worker.py
-
-Cgroup + BPF orchestration for per-subu steering.
-
-5.4.1 ensure_mounts() -> None
-5.4.2 install_steering(subu: Subu, wg_iface: str) -> None
-5.4.3 remove_steering(subu: Subu) -> None
-5.4.4 class BpfError(Exception)
-"""
-import os, subprocess, json
-from pathlib import Path
-
-class BpfError(RuntimeError): pass
-
-def run(cmd, check=True):
- r = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
- if check and r.returncode != 0:
- raise BpfError(f"cmd failed: {' '.join(cmd)}\n{r.stderr}")
- return r.stdout.strip()
-
-def ensure_mounts():
- # ensure bpf and cgroup v2 are mounted
- try:
- Path("/sys/fs/bpf").mkdir(parents=True, exist_ok=True)
- run(["mount","-t","bpf","bpf","/sys/fs/bpf"], check=False)
- except Exception:
- pass
- try:
- Path("/sys/fs/cgroup").mkdir(parents=True, exist_ok=True)
- run(["mount","-t","cgroup2","none","/sys/fs/cgroup"], check=False)
- except Exception:
- pass
-
-def cgroup_path(subu_id: str) -> str:
- return f"/sys/fs/cgroup/{subu_id}"
-
-def install_steering(subu_id: str, netns: str, ifname: str):
- ensure_mounts()
- cg = Path(cgroup_path(subu_id))
- cg.mkdir(parents=True, exist_ok=True)
-
- # compile BPF
- obj = Path("./bpf_force_egress.o")
- src = Path("./bpf_force_egress.c")
- if not src.exists():
- raise BpfError("bpf_force_egress.c missing next to manager")
-
- # Build object (requires clang/llc/bpftool)
- run(["clang","-O2","-g","-target","bpf","-c",str(src),"-o",str(obj)])
-
- # Load program into bpffs; attach to cgroup/inet4_connect + inet4_post_bind (MVP)
- pinned = f"/sys/fs/bpf/{subu_id}_egress"
- run(["bpftool","prog","loadall",str(obj),pinned], check=True)
-
- # Attach to hooks (MVP validation hooks)
- # NOTE: these are safe no-ops for now; they validate UID and stash ifindex map.
- for hook in ("cgroup/connect4","cgroup/post_bind4"):
- run(["bpftool","cgroup","attach",cgroup_path(subu_id),"attach",hook,"pinned",f"{pinned}/prog_0"], check=False)
-
- # Write metadata for ifname (saved for future prog versions)
- meta = {"ifname": ifname}
- Path(f"/sys/fs/bpf/{subu_id}_meta.json").write_text(json.dumps(meta))
-
-def remove_steering(subu_id: str):
- cg = cgroup_path(subu_id)
- # Detach whatever is attached
- for hook in ("cgroup/connect4","cgroup/post_bind4"):
- subprocess.run(["bpftool","cgroup","detach",cg,"detach",hook], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
- # Remove pinned prog dir
- pinned = Path(f"/sys/fs/bpf/{subu_id}_egress")
- if pinned.exists():
- subprocess.run(["bpftool","prog","detach",str(pinned)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
- try:
- for p in pinned.glob("*"): p.unlink()
- pinned.rmdir()
- except Exception:
- pass
- # Remove cgroup dir
- try:
- Path(cg).rmdir()
- except Exception:
- pass
+++ /dev/null
-# infrastructure/db.py
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-
-import sqlite3
-from pathlib import Path
-import env
-
-
-def schema_path_default():
- """
- Path to schema.sql, assumed to live next to this file.
- """
- return Path(__file__).with_name("schema.sql")
-
-
-def open_db(path=None):
- """
- Return a sqlite3.Connection with sensible pragmas.
- Caller is responsible for closing.
-
- If path is None, the canonical manager DB path from env.db_path()
- is used. The parent directory is created if it does not exist.
- """
- if path is None:
- path = env.db_path()
-
- path_obj = Path(path)
- parent = path_obj.parent
-
- try:
- parent.mkdir(parents=True, exist_ok=True)
- except PermissionError as e:
- raise RuntimeError(f"cannot create DB directory '{parent}': {e}") from e
-
- conn = sqlite3.connect(str(path_obj))
- conn.row_factory = sqlite3.Row
- conn.execute("PRAGMA foreign_keys = ON")
- conn.execute("PRAGMA journal_mode = WAL")
- conn.execute("PRAGMA synchronous = NORMAL")
- return conn
-
-
-def ensure_schema(conn):
- """
- Ensure the schema in schema.sql is applied.
- This is idempotent: executing the DDL again is acceptable.
- """
- sql = schema_path_default().read_text(encoding="utf-8")
- conn.executescript(sql)
- conn.commit()
+++ /dev/null
-# domain/device.py
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-
-"""
-Device-aware reconciliation of subu state.
-
-This module assumes:
- * Devices with user data are mounted as: /mnt/<mapname>
- * On each device, user data lives under: /mnt/<mapname>/user_data/<masu>
- * Subu home directories follow the pattern:
-
- /mnt/<mapname>/user_data/<masu>/subu_data/<subu0>/subu_data/<subu1>/...
-
- i.e., each subu directory may contain a 'subu_data' directory for children.
-
-Given an open SQLite connection, scan_and_reconcile() will:
-
- * Discover all devices under a base directory (default: /mnt)
- * For each device that has 'user_data':
- - Upsert a row in the 'device' table.
- - Discover all subu paths for all masus on that device.
- - Upsert/refresh rows in 'subu' with device_id + is_online=1.
- - Mark any previously-known subu on that device that are not seen
- in the current scan as is_online=0.
-"""
-
-import os
-from datetime import datetime
-from pathlib import Path
-
-from domain.subu import subu_username
-
-
-def _utc_now() -> str:
- """
- Return a UTC timestamp string suitable for created_at/updated_at/last_seen.
- Example: '2025-11-11T05:30:12Z'
- """
- return datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
-
-
-def _walk_subu_paths(subu_root: Path):
- """
- Yield all subu paths under a root 'subu_data' directory.
-
- Layout assumption:
-
- subu_root/
- S0/
- ...files...
- subu_data/
- S1/
- ...
- subu_data/
- S2/
- ...
-
- For each logical path:
- ['S0'] (top-level)
- ['S0','S1'] (child)
- ['S0','S1','S2'] (grand-child)
- ...
-
- we yield the list of path components.
- """
- stack: list[tuple[Path, list[str]]] = [(subu_root, [])]
-
- while stack:
- current_root, prefix = stack.pop()
- try:
- entries = sorted(current_root.iterdir(), key =lambda p: p.name)
- except FileNotFoundError:
- continue
-
- for entry in entries:
- if not entry.is_dir():
- continue
- name = entry.name
- path_components = prefix + [name]
- yield path_components
-
- child_subu_data = entry / "subu_data"
- if child_subu_data.is_dir():
- stack.append((child_subu_data, path_components))
-
-
-def _upsert_device(
- conn,
- mapname: str,
- mount_point: str,
- kind: str ="external",
-) -> int:
- """
- Ensure a row exists for this device and return its id.
-
- We do NOT try to discover fs_uuid/luks_uuid here; those can be filled
- in later if desired.
- """
- now = _utc_now()
-
- cur = conn.execute(
- "SELECT id FROM device WHERE mapname = ?",
- (mapname,),
- )
- row = cur.fetchone()
-
- if row:
- device_id = row["id"]
- conn.execute(
- """
- UPDATE device
- SET mount_point = ?,
- kind = ?,
- state = 'online',
- last_seen = ?
- WHERE id = ?
- """,
- (mount_point, kind, now, device_id),
- )
- else:
- cur = conn.execute(
- """
- INSERT INTO device (mapname, mount_point, kind, state, last_seen)
- VALUES (?, ?, ?, 'online', ?)
- """,
- (mapname, mount_point, kind, now),
- )
- device_id = cur.lastrowid
-
- return int(device_id)
-
-
-def _ensure_subu_row(
- conn,
- device_id: int,
- owner: str,
- subu_path_components: list[str],
- full_path_str: str,
- now: str,
-):
- """
- Upsert a row in 'subu' for (owner, subu_path_components) on device_id.
-
- full_path_str is the human-readable path, e.g. 'Thomas local' or
- 'Thomas developer bolt'.
- """
- if not subu_path_components:
- return
-
- leaf_name = subu_path_components[-1]
- full_unix_name = subu_username(owner, subu_path_components)
-
- # For now, we simply reuse full_unix_name as netns_name.
- netns_name = full_unix_name
-
- # See if a row already exists for this owner + path.
- cur = conn.execute(
- "SELECT id FROM subu WHERE owner = ? AND path = ?",
- (owner, full_path_str),
- )
- row = cur.fetchone()
-
- if row:
- subu_id = row["id"]
- conn.execute(
- """
- UPDATE subu
- SET device_id = ?,
- is_online = 1,
- updated_at = ?
- WHERE id = ?
- """,
- (device_id, now, subu_id),
- )
- return
-
- # Insert new row
- conn.execute(
- """
- INSERT INTO subu (
- owner,
- name,
- full_unix_name,
- path,
- netns_name,
- wg_id,
- device_id,
- is_online,
- created_at,
- updated_at
- )
- VALUES (?, ?, ?, ?, ?, NULL, ?, 1, ?, ?)
- """,
- (
- owner,
- leaf_name,
- full_unix_name,
- full_path_str,
- netns_name,
- device_id,
- now,
- now,
- ),
- )
-
-
-def _reconcile_device_for_mount(conn, device_id: int, user_data_dir: Path):
- """
- Reconcile all subu on a particular device.
-
- user_data_dir is a path like:
-
- /mnt/Eagle/user_data
-
- Under which we expect:
-
- /mnt/Eagle/user_data/<masu>/subu_data/...
- """
- now = _utc_now()
- discovered: set[tuple[str, str]] = set()
-
- try:
- owners = sorted(user_data_dir.iterdir(), key =lambda p: p.name)
- except FileNotFoundError:
- return
-
- for owner_entry in owners:
- if not owner_entry.is_dir():
- continue
-
- owner = owner_entry.name
- subu_root = owner_entry / "subu_data"
- if not subu_root.is_dir():
- # masu with no subu_data; skip
- continue
-
- for subu_components in _walk_subu_paths(subu_root):
- # Full logical path is: [owner] + subu_components
- path_tokens = [owner] + subu_components
- path_str = " ".join(path_tokens)
- discovered.add((owner, path_str))
-
- _ensure_subu_row(
- conn =conn,
- device_id =device_id,
- owner =owner,
- subu_path_components =subu_components,
- full_path_str =path_str,
- now =now,
- )
-
- # Mark any existing subu on this device that we did NOT see as offline.
- cur = conn.execute(
- "SELECT id, owner, path FROM subu WHERE device_id = ?",
- (device_id,),
- )
- existing = cur.fetchall()
- for row in existing:
- key = (row["owner"], row["path"])
- if key in discovered:
- continue
- conn.execute(
- """
- UPDATE subu
- SET is_online = 0,
- updated_at = ?
- WHERE id = ?
- """,
- (now, row["id"]),
- )
-
-
-def scan_and_reconcile(conn, base_dir: str ="/mnt") -> int:
- """
- Scan all mounted devices under base_dir for 'user_data' trees and
- reconcile them into the database.
-
- For each directory 'base_dir/<mapname>':
-
- * If it contains 'user_data', it is treated as a device.
- * A 'device' row is upserted (mapname = basename).
- * All subu under the corresponding user_data tree are reconciled.
-
- Returns:
- Number of devices that were processed.
- """
- root = Path(base_dir)
- if not root.is_dir():
- return 0
-
- processed = 0
-
- for entry in sorted(root.iterdir(), key =lambda p: p.name):
- if not entry.is_dir():
- continue
-
- mapname = entry.name
- user_data_dir = entry / "user_data"
- if not user_data_dir.is_dir():
- continue
-
- mount_point = str(entry)
- device_id = _upsert_device(conn, mapname, mount_point)
- _reconcile_device_for_mount(conn, device_id, user_data_dir)
- processed += 1
-
- conn.commit()
- return processed
+++ /dev/null
-# infrastructure/options_store.py
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-
-from pathlib import Path
-
-# Options file lives next to CLI in the manager release tree.
-# In dev it will be the same relative layout.
-OPTIONS_FILE = Path("subu.options")
-
-
-def load_options():
- """
- Load options from subu.options into a dictionary.
-
- Lines are of the form: key=value
- Lines starting with '#' or blank lines are ignored.
- """
- opts = {}
- if not OPTIONS_FILE.exists():
- return opts
- text = OPTIONS_FILE.read_text(encoding="utf-8")
- for line in text.splitlines():
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- if "=" not in line:
- continue
- k, v = line.split("=", 1)
- opts[k.strip()] = v.strip()
- return opts
-
-
-def save_options(opts: dict):
- """
- Save a dictionary of options back to subu.options.
- """
- lines = []
- for k in sorted(opts.keys()):
- v = opts[k]
- lines.append(f"{k}={v}\n")
- OPTIONS_FILE.write_text("".join(lines), encoding="utf-8")
-
-
-def set_option(name: str, value: str):
- """
- Set a single option key to a value.
- """
- opts = load_options()
- opts[name] = value
- save_options(opts)
-
-
-def get_option(name: str, default=None):
- """
- Get an option value by name, or default if missing.
- """
- opts = load_options()
- return opts.get(name, default)
+++ /dev/null
--- schema.sql
---
--- Schema for subu manager, including device-aware subu tracking.
-
--- Devices that can hold one or more masu homes.
--- Each row represents a physical (or logical) storage volume
--- identified by a mapname like 'Eagle' and optionally by UUIDs.
-CREATE TABLE device (
- id INTEGER PRIMARY KEY,
- mapname TEXT NOT NULL UNIQUE, -- e.g. 'Eagle'
- fs_uuid TEXT, -- filesystem UUID (optional)
- luks_uuid TEXT, -- LUKS UUID (optional)
- mount_point TEXT NOT NULL, -- e.g. '/mnt/Eagle'
- kind TEXT NOT NULL DEFAULT 'external', -- 'local','external','encrypted',...
- state TEXT NOT NULL DEFAULT 'offline', -- 'online','offline','error'
- last_seen TEXT NOT NULL -- ISO8601 UTC timestamp
-);
-
--- parents via parent_id; one row per node in the tree
-CREATE TABLE subu_node (
- id INTEGER PRIMARY KEY,
- owner TEXT NOT NULL, -- masu
- name TEXT NOT NULL, -- this segment (e.g., developer, bolt)
- parent_id INTEGER, -- NULL for top-level subu under owner
- full_unix_name TEXT NOT NULL UNIQUE, -- e.g., Thomas_developer_bolt
- full_path TEXT NOT NULL, -- e.g., "Thomas developer bolt"
- netns_name TEXT NOT NULL, -- default = full_unix_name
- device_id INTEGER, -- NULL=local
- is_online INTEGER NOT NULL DEFAULT 1,
- created_at TEXT NOT NULL,
- updated_at TEXT NOT NULL,
- FOREIGN KEY(parent_id) REFERENCES subu_node(id),
- FOREIGN KEY(device_id) REFERENCES device(id),
- UNIQUE(owner, name, parent_id) -- no duplicate siblings
-);
-
-CREATE INDEX idx_node_owner_parent ON subu_node(owner, parent_id);
-CREATE INDEX idx_node_device ON subu_node(device_id);
-
+++ /dev/null
-# infrastructure/unix.py
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-
-import subprocess, pwd, grp
-
-
-def run(cmd, check =True):
- """
- Run a Unix command, capturing output.
-
- Raises RuntimeError if check is True and the command fails.
- """
- r = subprocess.run(
- cmd,
- stdout =subprocess.PIPE,
- stderr =subprocess.PIPE,
- text =True,
- )
- if check and r.returncode != 0:
- raise RuntimeError(f"cmd failed: {' '.join(cmd)}\n{r.stderr}")
- return r
-
-
-def group_exists(name: str) -> bool:
- try:
- grp.getgrnam(name)
- return True
- except KeyError:
- return False
-
-
-def user_exists(name: str) -> bool:
- try:
- pwd.getpwnam(name)
- return True
- except KeyError:
- return False
-
-
-def ensure_unix_group(name: str):
- """
- Ensure a Unix group with this name exists.
- """
- if not group_exists(name):
- run(["groupadd", name])
-
-
-def ensure_unix_user(name: str, primary_group: str):
- """
- Ensure a Unix user with this name exists and has the given primary group.
-
- The primary group is made if needed.
- """
- ensure_unix_group(primary_group)
- if not user_exists(name):
- run(["useradd", "-m", "-g", primary_group, "-s", "/bin/bash", name])
-
-
-def ensure_user_in_group(user: str, group: str):
- """
- Ensure 'user' is a member of supplementary group 'group'.
-
- No-op if already present.
- """
- if not user_exists(user):
- raise RuntimeError(f"ensure_user_in_group: user '{user}' does not exist")
- if not group_exists(group):
- raise RuntimeError(f"ensure_user_in_group: group '{group}' does not exist")
-
- g = grp.getgrnam(group)
- if user in g.gr_mem:
- return
-
- run(["usermod", "-a", "-G", group, user])
-
-
-def remove_user_from_group(user: str, group: str):
- """
- Ensure 'user' is NOT a member of supplementary group 'group'.
-
- No-op if user or group is missing, or if user is not a member.
- """
- if not user_exists(user):
- return
- if not group_exists(group):
- return
-
- g = grp.getgrnam(group)
- if user not in g.gr_mem:
- return
-
- # gpasswd -d user group is the standard way on Debian/Ubuntu.
- # We treat failures as non-fatal.
- run(["gpasswd", "-d", user, group], check =False)
-
-
-def remove_unix_user_and_group(name: str):
- """
- Remove a Unix user and group that match this name, if they exist.
-
- The user is removed first, then the group.
- """
- if user_exists(name):
- run(["userdel", name])
- if group_exists(name):
- run(["groupdel", name])
--- /dev/null
+#!/usr/bin/env python3
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+"""
+install/subu_install.py — one-shot installer for subu boot wiring
+
+What it does:
+ 1) Copies usr_local_bin/boot_attach -> /usr/local/bin/subu-boot-attach (0755)
+ 2) Installs systemd units:
+ systemd/boot_attach.service -> /etc/systemd/system/subu-boot-attach.service
+ systemd/subu_resume.service -> /etc/systemd/system/subu-resume.service
+ and enables them (boot + resume).
+ 3) Ensures SQLite schema exists by importing env/db and executing schema.sql.
+ (No CLI parsing; direct function calls.)
+"""
+
+import os, sys, shutil, stat, subprocess, pathlib, sqlite3
+from pathlib import Path
+
+# --- utils -------------------------------------------------------------------
+
+def must_root():
+ if os.geteuid() != 0:
+ print("error: must run as root (sudo)", file=sys.stderr)
+ sys.exit(1)
+
+def run(*args):
+ return subprocess.run(args, check=False)
+
+def die(msg: str):
+ print(f"error: {msg}", file=sys.stderr); sys.exit(1)
+
+def install_file(src: Path, dst: Path, mode: int):
+ dst.parent.mkdir(parents=True, exist_ok=True)
+ shutil.copy2(src, dst)
+ os.chmod(dst, mode)
+
+def install_text(src: Path, dst: Path, mode: int, substitutions: dict[str,str]|None=None):
+ dst.parent.mkdir(parents=True, exist_ok=True)
+ text = src.read_text(encoding="utf-8")
+ if substitutions:
+ for k,v in substitutions.items():
+ text = text.replace(k, v)
+ dst.write_text(text, encoding="utf-8")
+ os.chmod(dst, mode)
+
+# --- schema loader (direct, no CLI) ------------------------------------------
+
+def ensure_schema(repo_root: Path):
+ # import from the repo directly
+ cli_dir = repo_root / "CLI"
+ infra_dir = cli_dir / "infrastructure"
+ schema_sql = infra_dir / "schema.sql"
+
+ if not schema_sql.is_file():
+ die(f"schema.sql not found at {schema_sql}")
+
+ # make repo modules importable
+ sys.path.insert(0, str(cli_dir))
+
+ try:
+ import env # CLI/env.py
+ from infrastructure import db as dbmod # CLI/infrastructure/db.py
+ except Exception as e:
+ die(f"failed to import env/db from {cli_dir}: {e}")
+
+ db_path = Path(env.db_path())
+ db_dir = db_path.parent
+ db_dir.mkdir(parents=True, exist_ok=True)
+ # lock down dir for root
+ os.chmod(db_dir, 0o700)
+
+ # open and apply schema idempotently
+ conn = dbmod.open_db()
+ try:
+ sql = schema_sql.read_text(encoding="utf-8")
+ conn.executescript(sql)
+ conn.commit()
+ finally:
+ conn.close()
+ print(f"✅ schema ensured at {db_path}")
+
+# --- main --------------------------------------------------------------------
+
+def main():
+ must_root()
+ repo_root = Path(__file__).resolve().parents[1]
+
+ # 1) install helper
+ src_boot = repo_root / "usr_local_bin" / "boot_attach"
+ dst_boot = Path("/usr/local/bin/subu-boot-attach")
+ if not src_boot.is_file():
+ die(f"missing helper: {src_boot}")
+ install_file(src_boot, dst_boot, 0o755)
+ print(f"✅ installed helper -> {dst_boot}")
+
+ # 2) install systemd units (with light substitution if needed)
+ sysd_src_attach = repo_root / "systemd" / "boot_attach.service"
+ sysd_src_resume = repo_root / "systemd" / "subu_resume.service"
+ sysd_dst_attach = Path("/etc/systemd/system/subu-boot-attach.service")
+ sysd_dst_resume = Path("/etc/systemd/system/subu-resume.service")
+
+ if not sysd_src_attach.is_file():
+ die(f"missing unit: {sysd_src_attach}")
+ if not sysd_src_resume.is_file():
+ die(f"missing unit: {sysd_src_resume}")
+
+ # Allow service templates to reference {{BOOT_ATTACH}} if they want
+ subs = {"{{BOOT_ATTACH}}": str(dst_boot)}
+ install_text(sysd_src_attach, sysd_dst_attach, 0o644, substitutions=subs)
+ install_text(sysd_src_resume, sysd_dst_resume, 0o644, substitutions=subs)
+ print(f"✅ installed units -> {sysd_dst_attach}, {sysd_dst_resume}")
+
+ # 3) ensure schema exists (direct call)
+ ensure_schema(repo_root)
+
+ # 4) reload + enable
+ run("systemctl", "daemon-reload")
+ run("systemctl", "enable", "--now", "subu-boot-attach.service")
+ run("systemctl", "enable", "subu-resume.service")
+
+ print("✅ systemd units enabled (boot attach now active)")
+ print("All set.\n"
+ "- On next boot (or now via: systemctl start subu-boot-attach.service),\n"
+ " homes under /mnt/*/user_data/<masu> will be bound to /home/<masu>,\n"
+ " the DB will be reconciled, and subu bindfs mounts reopened.")
+
+if __name__ == "__main__":
+ main()
--- /dev/null
+[Unit]
+Description=Attach /home/<masu> from mounted devices and reconcile subu DB
+After=local-fs.target network-online.target
+Wants=local-fs.target
+
+[Service]
+Type=oneshot
+ExecStart=/usr/bin/env python3 /usr/local/lib/subu/boot_attach.py
+RemainAfterExit=yes
+
+[Install]
+WantedBy=multi-user.target
--- /dev/null
+[Unit]
+Description=Re-attach subu homes after suspend/resume
+After=suspend.target
+Wants=suspend.target
+
+[Service]
+Type=oneshot
+ExecStart=/usr/bin/systemctl start subu-boot-attach.service
+
+[Install]
+WantedBy=suspend.target
+++ /dev/null
-# text.py
-# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
-
-"""
-text.py — user-facing text for the subu manager CLI.
-"""
-
-
-class _Text:
- def __init__(self, program_name: str):
- self.program_name = program_name
- # Keep version string in one place here for now.
- self._version = "0.3.4"
-
- # ---- Public API expected by CLI.py ---------------------------------------
-
- def version(self) -> str:
- """
- Return a short version string suitable for 'PROG version'.
- """
- return f"{self._version}\n"
-
- def usage(self) -> str:
- """
- Return a short usage summary including the command surface.
- """
- p = self.program_name
- v = self._version
- return (
- f"{p} — Subu manager (v{v})\n"
- "\n"
- "Usage:\n"
-
- f" {p} # usage\n"
- f" {p} help # detailed help\n"
- f" {p} example # example workflow\n"
- f" {p} version # print version\n"
- "\n"
-
- f" {p} db load schema\n"
- "\n"
-
- f" {p} subu make <masu> <subu> [<subu> ...]\n"
- f" {p} subu capture <masu> <subu> [<subu> ...]\n"
- f" {p} subu list\n"
- f" {p} subu info subu_<id>\n"
- f" {p} subu info <masu> <subu> [<subu> ...]\n"
- f" {p} subu remove subu_<id>\n"
- f" {p} subu remove <masu> <subu> [<subu> ...]\n"
- f" {p} subu option set incommon subu_<id>\n"
- f" {p} subu option set incommon <masu> <subu> [<subu> ...]\n"
- f" {p} subu option clear incommon subu_<id>\n"
- f" {p} subu option clear incommon <masu> <subu> [<subu> ...]\n"
- "\n"
-
- f" {p} lo up|down <Subu_ID>\n"
- "\n"
-
- f" {p} WG global <BaseCIDR>\n"
- f" {p} WG make <host:port>\n"
- f" {p} WG server_provided_public_key <WG_ID> <Base64Key>\n"
- f" {p} WG info|information <WG_ID>\n"
- f" {p} WG up <WG_ID>\n"
- f" {p} WG down <WG_ID>\n"
- "\n"
-
- f" {p} attach WG <Subu_ID> <WG_ID>\n"
- f" {p} detach WG <Subu_ID>\n"
- "\n"
-
- f" {p} network up|down <Subu_ID>\n"
- "\n"
-
- f" {p} option set <Subu_ID> <name> <value>\n"
- f" {p} option get <Subu_ID> <name>\n"
- f" {p} option list <Subu_ID>\n"
- "\n"
-
- f" {p} exec <Subu_ID> -- <cmd> ...\n"
- )
-
- def help(self) -> str:
- """
- Return a more detailed help text.
-
- For now this is usage plus a short explanatory block.
- """
- p = self.program_name
- return (
- self.usage()
- + "\n"
- "Notes:\n"
- f" * '{p} db load schema' must be run as root and will create/update the\n"
- " manager's SQLite database (schema only).\n"
- " * 'subu' commands manage subu records and their corresponding Unix users.\n"
- " They accept either a numeric Subu_ID (e.g. 'subu_3') or a path\n"
- " (<masu> <subu> [<subu> ...]) where noted.\n"
- " * WireGuard, attach/detach, network, option, and exec commands are\n"
- " reserved for managing networking and runtime behavior of existing subu.\n"
- "\n"
- )
-
- def example(self) -> str:
- """
- Return an example workflow.
- """
- p = self.program_name
- return (
- f"Example workflow:\n"
- "\n"
- f" # 1. As root, create or update the manager database schema\n"
- f" sudo {p} db load schema\n"
- "\n"
- f" # 2. As root, create a developer subu for Thomas\n"
- f" sudo {p} subu make Thomas developer\n"
- "\n"
- f" # 3. As root, create a nested subu 'bolt' under Thomas/developer\n"
- f" sudo {p} subu make Thomas developer bolt\n"
- "\n"
- f" # 4. As any user, list all known subu\n"
- f" {p} subu list\n"
- "\n"
- f" # 5. Show detailed info by path\n"
- f" {p} subu info Thomas developer bolt\n"
- "\n"
- f" # 6. Later, remove the nested subu by ID\n"
- f" sudo {p} subu remove subu_3\n"
- "\n"
- )
-
-
-def make_text(program_name: str) -> _Text:
- """
- Factory used by CLI.py to get a text provider for the given program name.
- """
- return _Text(program_name)
+++ /dev/null
-verbs = [
- "usage",
- "help",
- "example",
- "version",
- "init",
- "make",
- "make",
- "info",
- "information",
- "WG",
- "attach",
- "detach",
- "network",
- "lo",
- "option",
- "exec",
-]
-
-p_make = subparsers.add_parser(
- "make",
- help="Create a Subu with hierarchical name + Unix user/groups + netns",
-)
-p_make.add_argument(
- "path",
- nargs="+",
- help="Full Subu path, e.g. 'Thomas US' or 'Thomas new-subu Rabbit'",
-)
-
-elif args.verb == "make":
- subu_id = core.make_subu(args.path)
- print(subu_id)
--- /dev/null
+#!/usr/bin/env python3
+# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*-
+import os, subprocess, sys
+from pathlib import Path
+
+BASE = Path("/mnt")
+SM = os.environ.get("SUBU_CLI", "/usr/local/bin/sm") # your wrapper; fallback to sm
+MAP_OWN_ALL = "/root/mount/masu__map_own_all.sh" # your existing script
+
+def sh(*args) -> int:
+ return subprocess.run(list(args), check=False).returncode
+
+def mounted(target: Path) -> bool:
+ try:
+ out = subprocess.check_output(["findmnt", "-T", str(target), "-n"], stderr=subprocess.DEVNULL)
+ return bool(out.strip())
+ except subprocess.CalledProcessError:
+ return False
+
+def ensure_bind_home(mapname: str, masu: str) -> None:
+ src = BASE / mapname / "user_data" / masu
+ dst = Path("/home") / masu
+ dst.mkdir(parents=True, exist_ok=True)
+ if mounted(dst):
+ return
+ # transient automount bind via systemd
+ # (automount improves UX on resume; bind is idempotent)
+ subprocess.run([
+ "systemd-mount",
+ "--quiet",
+ "--no-block",
+ "--type", "none",
+ "--automount",
+ "--options", "bind",
+ str(src), str(dst)
+ ], check=False)
+
+def main():
+ # 1) attach each /mnt/<mapname>/user_data/<masu> as /home/<masu>
+ for mp in sorted(p for p in BASE.iterdir() if p.is_dir()):
+ user_data = mp / "user_data"
+ if not user_data.is_dir():
+ continue
+ for masu_dir in sorted(p for p in user_data.iterdir() if p.is_dir()):
+ ensure_bind_home(mp.name, masu_dir.name)
+
+ # 2) reconcile DB (safe if DB absent: service ordering should run after schema load)
+ sh(SM, "device", "scan")
+
+ # 3) optional: reopen bindfs subu mounts for each mounted <masu>
+ if os.path.exists(MAP_OWN_ALL):
+ for home in sorted(Path("/home").iterdir()):
+ if not home.is_dir():
+ continue
+ # heuristics: only run when this /home/<masu> is a bind from /mnt/*/user_data/<masu>
+ try:
+ src = subprocess.check_output(["findmnt", "-no", "SOURCE", "-T", str(home)], text=True).strip()
+ except subprocess.CalledProcessError:
+ continue
+ if "/user_data/" in src:
+ subprocess.run([MAP_OWN_ALL, home.name], check=False)
+
+if __name__ == "__main__":
+ sys.exit(main())