From: Thomas Walker Lynch Date: Mon, 3 Nov 2025 06:02:05 +0000 (+0000) Subject: . X-Git-Url: https://git.reasoningtechnology.com/style/static/git-logo.png?a=commitdiff_plain;h=2e1e92f365bc784f192838c1849874091bf592ed;p=subu . --- diff --git a/developer/DNS/.gitignore b/developer/DNS/.gitignore new file mode 100644 index 0000000..181003e --- /dev/null +++ b/developer/DNS/.gitignore @@ -0,0 +1,6 @@ + +__pycache__ +stage/ +deprecated/ +scratchpad/ + diff --git a/developer/DNS/10-block-IPv6.nft b/developer/DNS/10-block-IPv6.nft new file mode 100644 index 0000000..2cd6a2b --- /dev/null +++ b/developer/DNS/10-block-IPv6.nft @@ -0,0 +1,27 @@ +# write /etc/nftables.d/10-block-IPv6.nft — drop all IPv6 +def configure(prov, planner, WriteFileMeta): + wfm = WriteFileMeta( + dpath="/etc/nftables.d", + fname="10-block-IPv6.nft", + owner="root", + mode=0o644, + ) + planner.displace(wfm) + planner.copy(wfm, content="""\ +table inet NO-IPV6 { + chain input { + type filter hook input priority -300; policy accept; + meta nfproto ipv6 counter comment "drop all IPv6 inbound" drop; + } + + chain output { + type filter hook output priority -300; policy accept; + meta nfproto ipv6 counter comment "drop all IPv6 outbound" drop; + } + + chain forward { + type filter hook forward priority -300; policy accept; + meta nfproto ipv6 counter comment "drop all IPv6 forward" drop; + } +} +""") diff --git a/developer/DNS/20-SUBU-ports.nft b/developer/DNS/20-SUBU-ports.nft new file mode 100644 index 0000000..6c31446 --- /dev/null +++ b/developer/DNS/20-SUBU-ports.nft @@ -0,0 +1,47 @@ +table inet SUBU-DNS-REDIRECT { + chain output { + type nat hook output priority -100; policy accept; + + # Redirect DNS for the subu UIDs to local Unbound listeners + meta skuid 2017 udp dport 53 redirect to :5301 + meta skuid 2018 udp dport 53 redirect to :5302 + meta skuid 2017 tcp dport 53 redirect to :5301 + meta skuid 2018 tcp dport 53 redirect to :5302 + } +} + +table inet SUBU-PORT-EGRESS { + chain output { + type filter hook output priority 0; policy accept; + + # Always allow loopback on egress + oifname "lo" accept + + # No IPv6 for subu (until you reintroduce v6) + meta skuid {2017,2018} meta nfproto ipv6 counter comment "no IPv6 for subu" drop + + ##### x6 (UID 2018) + # Block some exfil channels regardless of iface + meta skuid 2018 tcp dport {25,465,587} counter comment "block SMTP/Submission" drop + meta skuid 2018 udp dport {3478,5349,19302-19309} counter comment "block STUN/TURN" drop + meta skuid 2018 tcp dport 853 counter comment "block DoT (TCP/853)" drop + + # (Optional) allow ICMP echo out via x6 + meta skuid 2018 oifname "x6" ip protocol icmp icmp type echo-request accept + + # Enforce interface binding + meta skuid 2018 oifname "x6" accept + meta skuid 2018 oifname != "x6" counter comment "x6 must use wg x6" drop + + ##### US (UID 2017) + meta skuid 2017 tcp dport {25,465,587} counter drop comment "block SMTP/Submission" + meta skuid 2017 udp dport {3478,5349,19302-19309} counter drop comment "block STUN/TURN" + meta skuid 2017 tcp dport 853 counter drop comment "block DoT (TCP/853)" + + # (Optional) ICMP via US + meta skuid 2017 oifname "US" ip protocol icmp icmp type echo-request accept + + meta skuid 2017 oifname "US" accept + meta skuid 2017 oifname != "US" counter comment "US must use wg US" drop + } +} diff --git a/developer/DNS/Man_In_Grey_input_acceptance.py b/developer/DNS/Man_In_Grey_input_acceptance.py new file mode 100644 index 0000000..2608280 --- /dev/null +++ b/developer/DNS/Man_In_Grey_input_acceptance.py @@ -0,0 +1,26 @@ +# Man_In_Grey acceptance filter (default template) +# Return True to include a config file ,False to skip it. +# You receive a PlanProvenance object named `prov`. +# +# Common fields: +# prov.stage_root_dpath : Path +# prov.config_abs_fpath : Path +# prov.config_rel_fpath : Path +# prov.read_dir_dpath : Path +# prov.read_fname : str +# +# 1) Accept everything (default): +# def accept(prov): +# return True +# +# 2) Only a namespace: +# def accept(prov): +# return prov.config_rel_fpath.as_posix().startswith("dns/") +# +# 3) Exclude editor junk: +# def accept(prov): +# r = prov.config_rel_fpath.as_posix() +# return not (r.endswith("~") or r.endswith(".swp")) +# +def accept(prov): + return True diff --git a/developer/DNS/README.org b/developer/DNS/README.org new file mode 100644 index 0000000..615d155 --- /dev/null +++ b/developer/DNS/README.org @@ -0,0 +1,91 @@ +#+TITLE: DNS Bundle (Unbound + Per-subu Redirect) — RT-v2025.09.15.1 +#+AUTHOR: RT Toolkit +#+OPTIONS: toc:2 +#+STARTUP: show2levels + +* Overview +This bundle stages a *per-subu DNS* setup on the client: +- Two Unbound instances (templated via ~unbound@.service~): + - ~unbound@US~ listens on ~127.0.0.1:5301~, resolves *over the US tunnel* (outgoing interface = ~10.0.0.1~). + - ~unbound@x6~ listens on ~127.0.0.1:5302~, resolves *over the x6 tunnel* (outgoing interface = ~10.8.0.2~). +- nftables rules match the subu’s UID and *redirect TCP/UDP port 53* to the corresponding local Unbound port. +- A small deploy helper (~deploy_DNS.py~) installs the staged tree and enables services. + +* Why this design? +- When a subu (containerized user) does DNS, traffic is forced to the tunnel assigned to that subu. +- If a tunnel is down, DNS for that subu fails closed (no silent leak), while your ~local~ subu can still use ISP DNS. +- No changes to per-user resolv.conf are required: subu keep using ~nameserver 127.0.0.1~ (via redirect). + +* Layout +#+begin_example +DNS_bundle/ + README.org + deploy_DNS.py + stage/ + etc/ + nftables.d/ + DNS-redirect.nft + systemd/ + system/ + DNS-redirect.service + unbound@.service + unbound/ + unbound-US.conf + unbound-x6.conf + usr/ + local/ + sbin/ + DNS_status.sh +#+end_example + +* Assumptions / Customize +- Client WG local addresses (from your earlier setup): + - US: ~10.0.0.1/32~ + - x6: ~10.8.0.2/32~ +- Subu UIDs (adjust if different): + - US → UID ~2017~ + - x6 → UID ~2018~ +- If these differ on your box, edit: + - ~stage/etc/unbound/unbound-US.conf~ (~outgoing-interface~) + - ~stage/etc/unbound/unbound-x6.conf~ (~outgoing-interface~) + - ~stage/etc/nftables.d/DNS-redirect.nft~ (the ~meta skuid~ lines) + +* Deploy +1. Review staged files: + #+begin_src sh + tar tzf DNS_bundle.tgz | sed 's/^/ /' + #+end_src +2. Extract and run deploy (root): + #+begin_src sh + tar xzf DNS_bundle.tgz + cd DNS_bundle + sudo ./deploy_DNS.py --instances US x6 + #+end_src +3. Verify: + #+begin_src sh + systemctl status unbound@US unbound@x6 DNS-redirect + sudo nft list table inet NAT-DNS-REDIRECT + #+end_src + +* How it works +- nftables (~DNS-redirect.nft~) in ~inet~ *nat output* hook rewrites subu DNS to the local listener ports: + - US (UID 2017) → ~127.0.0.1:5301~ + - x6 (UID 2018) → ~127.0.0.1:5302~ +- Each Unbound instance binds to its port and *sources queries from the WG IP* using ~outgoing-interface~. +- Unit ordering ties each instance to its tunnel: ~After=~ and ~Requires=~ ~wg-quick@%i.service~. + +* Notes +- If a tunnel’s address is not present at Unbound start, the unit waits because of the dependency and restarts later. +- For DoT/DoH upstream, you can switch to ~forward-tls-upstream: yes~ with providers that support TLS on 853. +- The ~DNS_status.sh~ helper prints a quick status and the top of logs. + +* Rollback +#+begin_src sh +sudo systemctl disable --now unbound@US unbound@x6 DNS-redirect +sudo nft flush table inet NAT-DNS-REDIRECT || true +# Remove staged files if desired (be careful) +# sudo rm -f /etc/unbound/unbound-US.conf /etc/unbound/unbound-x6.conf +#+end_src + +* License +This bundle is provided “as-is”. Use at your own discretion. diff --git a/developer/DNS/doc_howto_install.org b/developer/DNS/doc_howto_install.org new file mode 100644 index 0000000..68476a3 --- /dev/null +++ b/developer/DNS/doc_howto_install.org @@ -0,0 +1,32 @@ + +* 1. modify stage files + + The stage/ directory holds bespoke configuration files for host StanleyPark's configuration. + + Copy/Modify the sraged files for your site. + + Work on the stage is done in user space. The program `sudo install_staged_tree.py` copies the files on the stage into the root file system, `/', or optionally to another specified directory target. However, normally one will run `deploy.py` to do the install and to make the systemctl calls to restart services. + +* 2. edit /etc/nftables.conf + + requires root priv + + Strange, but Debian 12 nftables does not automatically include the scripts in its drop-in directory, so .. + + add this at the bottom of /etc/nftables.conf + + flush ruleset + include "/etc/nftables.d/*.nft" + +* 3. run `deploy.py` + + requires root priv + +* 4. check + + requires root priv + + nft list ruleset | sed -n '/SUBU-/,/}/p' + systemctl status nftables + ss -ltnup 'sport = :5301' 'sport = :5302' # your Unbound listeners + diff --git a/developer/DNS/stage-US/10-block-IPv6.nft.py b/developer/DNS/stage-US/10-block-IPv6.nft.py new file mode 100644 index 0000000..2cd6a2b --- /dev/null +++ b/developer/DNS/stage-US/10-block-IPv6.nft.py @@ -0,0 +1,27 @@ +# write /etc/nftables.d/10-block-IPv6.nft — drop all IPv6 +def configure(prov, planner, WriteFileMeta): + wfm = WriteFileMeta( + dpath="/etc/nftables.d", + fname="10-block-IPv6.nft", + owner="root", + mode=0o644, + ) + planner.displace(wfm) + planner.copy(wfm, content="""\ +table inet NO-IPV6 { + chain input { + type filter hook input priority -300; policy accept; + meta nfproto ipv6 counter comment "drop all IPv6 inbound" drop; + } + + chain output { + type filter hook output priority -300; policy accept; + meta nfproto ipv6 counter comment "drop all IPv6 outbound" drop; + } + + chain forward { + type filter hook forward priority -300; policy accept; + meta nfproto ipv6 counter comment "drop all IPv6 forward" drop; + } +} +""") diff --git a/developer/DNS/stage-US/20-SUBU-ports-US.nft.py b/developer/DNS/stage-US/20-SUBU-ports-US.nft.py new file mode 100644 index 0000000..c9c8f5e --- /dev/null +++ b/developer/DNS/stage-US/20-SUBU-ports-US.nft.py @@ -0,0 +1,51 @@ +# write /etc/nftables.d/20-SUBU-ports.nft — DNS redirect + strict egress +def configure(prov, planner, WriteFileMeta): + wfm = WriteFileMeta( + dpath="/etc/nftables.d", + fname="20-SUBU-ports.nft", + owner="root", + mode=0o644, + ) + planner.displace(wfm) + planner.copy(wfm, content="""\ +# DNS per-UID redirect to local Unbound +table inet SUBU-DNS-REDIRECT { + chain output { + type nat hook output priority -100; policy accept; + + # US (uid 2017) -> 127.0.0.1:5301 + meta skuid 2017 udp dport 53 redirect to :5301 + meta skuid 2017 tcp dport 53 redirect to :5301 + # x6 (uid 2018) -> 127.0.0.1:5302 + meta skuid 2018 udp dport 53 redirect to :5302 + meta skuid 2018 tcp dport 53 redirect to :5302 + } +} + +# Egress policy: subu UIDs must use their WireGuard iface; block exfil channels +table inet SUBU-PORT-EGRESS { + chain output { + type filter hook output priority 0; policy accept; + + # Always allow loopback + oifname "lo" accept; + + # No IPv6 for subu (until you reintroduce v6) + meta skuid {2017,2018} meta nfproto ipv6 counter comment "no IPv6 for subu" drop; + + ##### x6 (UID 2018) + meta skuid 2018 tcp dport {25,465,587} counter comment "block SMTP/Submission" drop; + meta skuid 2018 udp dport {3478,5349,19302-19309} counter comment "block STUN/TURN" drop; + meta skuid 2018 tcp dport 853 counter comment "block DoT (TCP/853)" drop; + meta skuid 2018 oifname "x6" accept; + meta skuid 2018 oifname != "x6" counter comment "x6 must use wg x6" drop; + + ##### US (UID 2017) + meta skuid 2017 tcp dport {25,465,587} counter comment "block SMTP/Submission" drop; + meta skuid 2017 udp dport {3478,5349,19302-19309} counter comment "block STUN/TURN" drop; + meta skuid 2017 tcp dport 853 counter comment "block DoT (TCP/853)" drop; + meta skuid 2017 oifname "US" accept; + meta skuid 2017 oifname != "US" counter comment "US must use wg US" drop; + } +} +""") diff --git a/developer/DNS/stage-US/30-unbound-US.conf.py b/developer/DNS/stage-US/30-unbound-US.conf.py new file mode 100644 index 0000000..697c38e --- /dev/null +++ b/developer/DNS/stage-US/30-unbound-US.conf.py @@ -0,0 +1,42 @@ +# write /etc/unbound/unbound-US.conf — local listener that egresses via US WG +def configure(prov, planner, WriteFileMeta): + wfm = WriteFileMeta( + dpath="/etc/unbound", + fname="unbound-US.conf", + owner="root", + mode=0o644, + ) + planner.displace(wfm) + planner.copy(wfm, content="""\ +server: + verbosity: 1 + username: "unbound" + directory: "/etc/unbound" + chroot: "" + + do-ip6: no + do-udp: yes + do-tcp: yes + prefer-ip6: no + + # Listen only on loopback (US instance) + interface: 127.0.0.1@5301 + access-control: 127.0.0.0/8 allow + + # Egress via US tunnel address (policy rules ensure it leaves on wg US) + outgoing-interface: 10.0.0.1 + + # Hardening/cache + hide-identity: yes + hide-version: yes + harden-referral-path: yes + harden-dnssec-stripped: yes + qname-minimisation: yes + aggressive-nsec: yes + prefetch: yes + cache-min-ttl: 60 + cache-max-ttl: 86400 + + # DNSSEC trust anchor + auto-trust-anchor-file: "/var/lib/unbound/root.key" +""") diff --git a/developer/DNS/stage-US/50-wg-policy-US.service.py b/developer/DNS/stage-US/50-wg-policy-US.service.py new file mode 100644 index 0000000..dc0b153 --- /dev/null +++ b/developer/DNS/stage-US/50-wg-policy-US.service.py @@ -0,0 +1,18 @@ +# /etc/systemd/system/wg-policy-US.service — run after wg-quick@US to install policy rules +def configure(prov, planner, WriteFileMeta): + content = """[Unit] +Description=Policy routing for Unbound egress (US) +After=wg-quick@US.service +Wants=wg-quick@US.service + +[Service] +Type=oneshot +ExecStart=/usr/local/sbin/wg-policy-US.sh +RemainAfterExit=yes + +[Install] +WantedBy=multi-user.target +""" + wfm = WriteFileMeta(dpath="/etc/systemd/system", fname="wg-policy-US.service", owner="root", mode="0644") + planner.displace(wfm) + planner.copy(wfm, content=content) diff --git a/developer/DNS/stage-US/51-wg-policy-US.sh.py b/developer/DNS/stage-US/51-wg-policy-US.sh.py new file mode 100644 index 0000000..63c2dd7 --- /dev/null +++ b/developer/DNS/stage-US/51-wg-policy-US.sh.py @@ -0,0 +1,19 @@ +# /usr/local/sbin/wg-policy-US.sh — source-policy routing for Unbound's egress +def configure(prov, planner, WriteFileMeta): + # EDIT if your interface/IP differ: + WG_IFACE = "US" + WG_SRC_IP = "10.0.0.1" + TABLE = 100 + + content = f"""#!/usr/bin/env bash +set -euo pipefail +WG_IFACE="{WG_IFACE}" +WG_SRC_IP="{WG_SRC_IP}" +TABLE={TABLE} + +ip rule replace from "$WG_SRC_IP" lookup "$TABLE" priority 10010 +ip route replace default dev "$WG_IFACE" table "$TABLE" +""" + wfm = WriteFileMeta(dpath="/usr/local/sbin", fname="wg-policy-US.sh", owner="root", mode="0755") + planner.displace(wfm) + planner.copy(wfm, content=content) diff --git a/developer/DNS/stage_0/30-dnsredir.nft b/developer/DNS/stage_0/30-dnsredir.nft new file mode 100644 index 0000000..8ab5249 --- /dev/null +++ b/developer/DNS/stage_0/30-dnsredir.nft @@ -0,0 +1,14 @@ +# Redirect DNS traffic per-UID to local Unbound instances. +# US (uid 2017) -> 127.0.0.1:5301 +# x6 (uid 2018) -> 127.0.0.1:5302 +table inet nat { + chain output { + type nat hook output priority -100; + # US + meta skuid 2017 udp dport 53 redirect to :5301 + meta skuid 2017 tcp dport 53 redirect to :5301 + # x6 + meta skuid 2018 udp dport 53 redirect to :5302 + meta skuid 2018 tcp dport 53 redirect to :5302 + } +} diff --git a/developer/DNS/stage_0/unbound-US.conf b/developer/DNS/stage_0/unbound-US.conf new file mode 100644 index 0000000..6a799f7 --- /dev/null +++ b/developer/DNS/stage_0/unbound-US.conf @@ -0,0 +1,40 @@ +server: + verbosity: 1 + username: "unbound" + directory: "/etc/unbound" + chroot: "" + + do-ip6: no + do-udp: yes + do-tcp: yes + prefer-ip6: no + + # Listen only on loopback (US instance) + interface: 127.0.0.1@5301 + access-control: 127.0.0.0/8 allow + + # Egress via US tunnel address (policy routing will carry it out the WG table) + outgoing-interface: 10.0.0.1 + + # Sensible hardening/cache + hide-identity: yes + hide-version: yes + harden-referral-path: yes + harden-dnssec-stripped: yes + qname-minimisation: yes + aggressive-nsec: yes + prefetch: yes + cache-min-ttl: 60 + cache-max-ttl: 86400 + + # DNSSEC TA (create with unbound-anchor) + auto-trust-anchor-file: "/var/lib/unbound/root.key" + # Optional root hints (download separately) + # root-hints: "/var/lib/unbound/root.hints" + +# To use forwarding instead of full recursion, uncomment and edit: +# forward-zone: +# name: "." +# forward-tls-upstream: no +# forward-addr: 9.9.9.9 +# forward-addr: 1.1.1.1 diff --git a/developer/DNS/stage_0/unbound-x6.conf b/developer/DNS/stage_0/unbound-x6.conf new file mode 100644 index 0000000..c34a068 --- /dev/null +++ b/developer/DNS/stage_0/unbound-x6.conf @@ -0,0 +1,40 @@ +server: + verbosity: 1 + username: "unbound" + directory: "/etc/unbound" + chroot: "" + + do-ip6: no + do-udp: yes + do-tcp: yes + prefer-ip6: no + + # Listen only on loopback (x6 instance) + interface: 127.0.0.1@5302 + access-control: 127.0.0.0/8 allow + + # Egress via x6 tunnel address (policy routing will carry it out the WG table) + outgoing-interface: 10.8.0.2 + + # Sensible hardening/cache + hide-identity: yes + hide-version: yes + harden-referral-path: yes + harden-dnssec-stripped: yes + qname-minimisation: yes + aggressive-nsec: yes + prefetch: yes + cache-min-ttl: 60 + cache-max-ttl: 86400 + + # DNSSEC TA (create with unbound-anchor) + auto-trust-anchor-file: "/var/lib/unbound/root.key" + # Optional root hints (download separately) + # root-hints: "/var/lib/unbound/root.hints" + +# To use forwarding instead of full recursion, uncomment and edit: +# forward-zone: +# name: "." +# forward-tls-upstream: no +# forward-addr: 9.9.9.9 +# forward-addr: 1.1.1.1 diff --git a/developer/DNS/stage_0/unbound@.service b/developer/DNS/stage_0/unbound@.service new file mode 100644 index 0000000..4fa31d8 --- /dev/null +++ b/developer/DNS/stage_0/unbound@.service @@ -0,0 +1,20 @@ +[Unit] +Description=Unbound DNS (%i) +Documentation=man:unbound(8) +After=network-online.target wg-quick@%i.service +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/sbin/unbound -d -p -c /etc/unbound/unbound-%i.conf +Restart=on-failure +# Lock down a bit +CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETGID CAP_SETUID +AmbientCapabilities=CAP_NET_BIND_SERVICE +NoNewPrivileges=true +PrivateTmp=true +ProtectSystem=full +ProtectHome=true + +[Install] +WantedBy=multi-user.target diff --git a/developer/DNS/unbound_US.py b/developer/DNS/unbound_US.py new file mode 100644 index 0000000..4c3f8ba --- /dev/null +++ b/developer/DNS/unbound_US.py @@ -0,0 +1,33 @@ +# unbound/unbound_US.py +def configure(prov, planner, WriteFileMeta): + conf = """server: + verbosity: 1 + username: "unbound" + directory: "/etc/unbound" + chroot: "" + + do-ip6: no + do-udp: yes + do-tcp: yes + prefer-ip6: no + + interface: 127.0.0.1@5301 + access-control: 127.0.0.0/8 allow + + outgoing-interface: 10.0.0.1 + + hide-identity: yes + hide-version: yes + harden-referral-path: yes + harden-dnssec-stripped: yes + qname-minimisation: yes + aggressive-nsec: yes + prefetch: yes + cache-min-ttl: 60 + cache-max-ttl: 86400 + + auto-trust-anchor-file: "/var/lib/unbound/root.key" +""" + wfm = WriteFileMeta(dpath="/etc/unbound", fname="unbound-US.conf", + owner="root", mode="0644") + planner.copy(wfm, content=conf) diff --git a/developer/DNS/unbound_at_template.py b/developer/DNS/unbound_at_template.py new file mode 100644 index 0000000..57326d2 --- /dev/null +++ b/developer/DNS/unbound_at_template.py @@ -0,0 +1,25 @@ +# systemd/unbound_at_template.py +def configure(prov, planner, WriteFileMeta): + service = """[Unit] +Description=Unbound DNS (%i) +Documentation=man:unbound(8) +After=network-online.target wg-quick@%i.service +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/sbin/unbound -d -p -c /etc/unbound/unbound-%i.conf +Restart=on-failure +CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETGID CAP_SETUID +AmbientCapabilities=CAP_NET_BIND_SERVICE +NoNewPrivileges=true +PrivateTmp=true +ProtectSystem=full +ProtectHome=true + +[Install] +WantedBy=multi-user.target +""" + wfm = WriteFileMeta(dpath="/etc/systemd/system", fname="unbound@.service", + owner="root", mode="0644") + planner.copy(wfm, content=service) diff --git a/developer/DNS/unbound_x6.py b/developer/DNS/unbound_x6.py new file mode 100644 index 0000000..979e05b --- /dev/null +++ b/developer/DNS/unbound_x6.py @@ -0,0 +1,33 @@ +# unbound/unbound_x6.py +def configure(prov, planner, WriteFileMeta): + conf = """server: + verbosity: 1 + username: "unbound" + directory: "/etc/unbound" + chroot: "" + + do-ip6: no + do-udp: yes + do-tcp: yes + prefer-ip6: no + + interface: 127.0.0.1@5302 + access-control: 127.0.0.0/8 allow + + outgoing-interface: 10.8.0.2 + + hide-identity: yes + hide-version: yes + harden-referral-path: yes + harden-dnssec-stripped: yes + qname-minimisation: yes + aggressive-nsec: yes + prefetch: yes + cache-min-ttl: 60 + cache-max-ttl: 86400 + + auto-trust-anchor-file: "/var/lib/unbound/root.key" +""" + wfm = WriteFileMeta(dpath="/etc/unbound", fname="unbound-x6.conf", + owner="root", mode="0644") + planner.copy(wfm, content=conf) diff --git a/developer/Man_In_Grey_input_acceptance.py b/developer/Man_In_Grey_input_acceptance.py deleted file mode 100644 index 2608280..0000000 --- a/developer/Man_In_Grey_input_acceptance.py +++ /dev/null @@ -1,26 +0,0 @@ -# Man_In_Grey acceptance filter (default template) -# Return True to include a config file ,False to skip it. -# You receive a PlanProvenance object named `prov`. -# -# Common fields: -# prov.stage_root_dpath : Path -# prov.config_abs_fpath : Path -# prov.config_rel_fpath : Path -# prov.read_dir_dpath : Path -# prov.read_fname : str -# -# 1) Accept everything (default): -# def accept(prov): -# return True -# -# 2) Only a namespace: -# def accept(prov): -# return prov.config_rel_fpath.as_posix().startswith("dns/") -# -# 3) Exclude editor junk: -# def accept(prov): -# r = prov.config_rel_fpath.as_posix() -# return not (r.endswith("~") or r.endswith(".swp")) -# -def accept(prov): - return True diff --git a/developer/cc/Db.lib.c b/developer/cc/Db.lib.c new file mode 100644 index 0000000..44c9b3f --- /dev/null +++ b/developer/cc/Db.lib.c @@ -0,0 +1,200 @@ +#ifndef IFACE +#define Db·IMPLEMENTATION +#define IFACE +#endif + +#ifndef Db·IFACE +#define Db·IFACE + + #include + #include + + // Enum for exit codes + typedef enum { + Db·EXIT_SUCCESS = 0, + Db·EXIT_DB_OPEN_ERROR, + Db·EXIT_SCHEMA_LOAD_ERROR, + Db·EXIT_MEMORY_ALLOCATION_ERROR, + Db·EXIT_STATEMENT_PREPARE_ERROR, + Db·EXIT_STATEMENT_EXECUTE_ERROR + } Db·ExitCode; + + // Interface prototypes + sqlite3* Db·open(const char *db_path ,bool create_if_not_exists); + Db·ExitCode Db·load_schema(sqlite3 *db, const char *schema_path); + Db·ExitCode Db·log_event(sqlite3 *db, int event_id, int user_id); + int Db·query( + sqlite3 *db + ,const char *sql + ,int (*callback)(void * ,int ,char ** ,char **) + ,void *callback_arg + ); + void Db·close(sqlite3 *db); + +#endif // Db·IFACE + +#ifndef Db·IMPLEMENTATION + + #include + #include + #include + #include + #include + #include + #include + + sqlite3* Db·open(const char *db_path ,bool create_if_not_exists){ + sqlite3 *db; + FILE *file_check = fopen(db_path ,"r"); + + if(!file_check && create_if_not_exists){ + file_check = fopen(db_path ,"w"); + if(!file_check){ + fprintf( + stderr, + "Db::open failed to create database file '%s': %s\n", + db_path, + strerror(errno) + ); + return NULL; + } + fclose(file_check); + printf("Db::open created new database file '%s'\n", db_path); + }else if(!file_check){ + fprintf(stderr ,"Db::open database file '%s' not found and create flag not set\n" ,db_path); + return NULL; + }else{ + fclose(file_check); + } + + if( sqlite3_open(db_path ,&db) != SQLITE_OK ){ + fprintf( + stderr, + "Db::open failed to open database '%s': %s\n", + db_path, + sqlite3_errmsg(db) + ); + return NULL; + } + + printf("Db::open database '%s' opened successfully\n", db_path); + return db; + } + + // Load schema from a file + Db·ExitCode Db·load_schema(sqlite3 *db ,const char *schema_path){ + FILE *file = fopen(schema_path, "r"); + if(!file){ + fprintf + ( + stderr + ,"Db::load_schema failed to open schema file '%s'\n" + ,schema_path + ); + return Db·EXIT_SCHEMA_LOAD_ERROR; + } + + fseek(file, 0, SEEK_END); + long file_size = ftell(file); + rewind(file); + + char *schema = malloc(file_size + 1); + if(!schema){ + fprintf(stderr, "Db::load_schema memory allocation failed\n"); + fclose(file); + return Db·EXIT_MEMORY_ALLOCATION_ERROR; + } + + fread(schema, 1, file_size, file); + schema[file_size] = '\0'; + fclose(file); + + char *err_msg = NULL; + if( sqlite3_exec(db, schema, NULL, NULL, &err_msg) != SQLITE_OK ){ + fprintf + ( + stderr + ,"Db::load_schema failed to execute schema: %s\n" + ,err_msg + ); + sqlite3_free(err_msg); + free(schema); + return Db·EXIT_STATEMENT_EXECUTE_ERROR; + } + + printf("Db::load_schema schema initialized successfully from '%s'\n", schema_path); + free(schema); + return Db·EXIT_SUCCESS; + } + + // Log an event into the database + Db·ExitCode Db·log_event(sqlite3 *db ,int event_id ,int user_id){ + const char *sql_template = + "INSERT INTO db_event (event_time ,event_id ,user_id) " + "VALUES (CURRENT_TIMESTAMP ,? ,?);"; + sqlite3_stmt *stmt; + + if( sqlite3_prepare_v2(db ,sql_template ,-1 ,&stmt ,NULL) != SQLITE_OK ){ + fprintf + ( + stderr + ,"Db::log_event failed to prepare statement: %s\n" + ,sqlite3_errmsg(db) + ); + return Db·EXIT_STATEMENT_PREPARE_ERROR; + } + + sqlite3_bind_int(stmt, 1, event_id); + sqlite3_bind_int(stmt, 2, user_id); + + if( sqlite3_step(stmt) != SQLITE_DONE ){ + fprintf + ( + stderr + ,"Db::log_event failed to execute statement: %s\n" + ,sqlite3_errmsg(db) + ); + sqlite3_finalize(stmt); + return Db·EXIT_STATEMENT_EXECUTE_ERROR; + } + + sqlite3_finalize(stmt); + return Db·EXIT_SUCCESS; + } + + // Query Execution Function + int Db·query( + sqlite3 *db + ,const char *sql + ,int (*callback)(void * ,int ,char ** ,char **) + ,void *callback_arg + ){ + char *err_msg = NULL; + int rc = sqlite3_exec(db ,sql ,callback ,callback_arg ,&err_msg); + + if( rc != SQLITE_OK ){ + fprintf + ( + stderr + ,"Db::query SQL error: %s\nQuery: %s\n" + ,err_msg + ,sql + ); + sqlite3_free(err_msg); + return rc; + } + + return SQLITE_OK; + } + + // Close the database + void Db·close(sqlite3 *db){ + if( db ){ + sqlite3_close(db); + printf("Db::close database connection closed\n"); + } + } + +#endif // Db·IMPLEMENTATION + + diff --git a/developer/cc/DbSubu.lib.c b/developer/cc/DbSubu.lib.c new file mode 100644 index 0000000..4274dec --- /dev/null +++ b/developer/cc/DbSubu.lib.c @@ -0,0 +1,157 @@ +#ifndef IFACE +#define DbSubu·IMPLEMENTATION +#define IFACE +#endif + +#ifndef DbSubu·IFACE +#define DbSubu·IFACE + + #include + + typedef struct DbSubu { + sqlite3 *db; + } DbSubu; + + + // db connection + DbSubu* DbSubu·open( const char *db_path ); + void DbSubu·close( DbSubu *db ); + int DbSubu·validate_schema( DbSubu *db ); + + // User Management + int DbSubu·add_user( DbSubu *db ,const char *name ,const char *home_directory ,int shell_id ,int parent_id ,int user_type_id ); + int DbSubu·delete_user( DbSubu *db ,int user_id ); + int DbSubu·get_user( DbSubu *db ,int user_id ,char **name ,char **home_directory ,int *shell_id ,int *parent_id ,int *user_type_id ); + + // Sharing Management + int DbSubu·add_share( DbSubu *db ,int user_id ,int other_user_id ,const char *permissions ); + int DbSubu·delete_share( DbSubu *db ,int share_id ); + + // System Resource Management + int DbSubu·grant_resource( DbSubu *db ,int user_id ,int resource_id ,int granted_by ); + int DbSubu·revoke_resource( DbSubu *db ,int user_id ,int resource_id ); + + // Event Logging + int DbSubu·log_event( DbSubu *db ,int event_id ,int user_id ); + +#endif // DbSubu·IFACE + +#ifdef DbSubu·IMPLEMENTATION + + #include + #include + #include + #include "Db.lib.c" + + // Open the database + DbSubu* DbSubu·open( const char *db_path ){ + DbSubu *db = malloc( sizeof(DbSubu) ); + if( !db ){ + fprintf( stderr ,"DbSubu·open:: failed to allocate memory for DbSubu\n" ); + return NULL; + } + db->db = Db·open(db_path ,true); + if( !db->db ){ + free( db ); + return NULL; + } + return db; + } + + // Close the database + void DbSubu·close( DbSubu *db ){ + if( db ){ + Db·close( db->db ); + free( db ); + } + } + + // Validate the schema + int DbSubu·validate_schema( DbSubu *db ){ + // Validation logic for ensuring the schema is correct + return 0; // Placeholder for schema validation implementation + } + + // Add a user + int DbSubu·add_user( DbSubu *db ,const char *name ,const char *home_directory ,int shell_id ,int parent_id ,int user_type_id ){ + char sql[256]; + snprintf + ( + sql + ,sizeof(sql) + ,"INSERT INTO user (name ,home_directory ,shell ,parent_id ,user_type_id) VALUES ('%s' ,'%s' ,%d ,%d ,%d);" + ,name + ,home_directory + ,shell_id + ,parent_id + ,user_type_id + ); + return Db·query( db->db ,sql ,NULL ,NULL ); + } + + // Delete a user + int DbSubu·delete_user( DbSubu *db ,int user_id ){ + char sql[128]; + snprintf( sql ,sizeof(sql) ,"DELETE FROM user WHERE id = %d;" ,user_id ); + return Db·query( db->db ,sql ,NULL ,NULL ); + } + + // Log an event + int DbSubu·log_event( DbSubu *db ,int event_id ,int user_id ){ + char sql[128]; + snprintf + ( + sql + ,sizeof(sql) + ,"INSERT INTO db_event (event_id ,user_id) VALUES (%d ,%d);" + ,event_id + ,user_id + ); + return Db·query( db->db ,sql ,NULL ,NULL ); + } + + // Add to a list (private function) + static int add_to_list( sqlite3 *db ,const char *list_name ,const char *entry_name ){ + char sql[128]; + snprintf + ( + sql + ,sizeof(sql) + ,"INSERT INTO %s (name) VALUES ('%s');" + ,list_name + ,entry_name + ); + return Db·query( db ,sql ,NULL ,NULL ); + } + + // Get list entries (private function) + static char** get_list( sqlite3 *db ,const char *list_name ,int *count ){ + char sql[128]; + snprintf( sql ,sizeof(sql) ,"SELECT name FROM %s;" ,list_name ); + + struct ListResult { + char **entries; + int count; + } result = { NULL ,0 }; + + int callback( void *arg ,int argc ,char **argv ,char **col_names ){ + (void)argc; (void)col_names; + struct ListResult *res = arg; + res->entries = realloc( res->entries ,(res->count + 1) * sizeof(char *) ); + res->entries[res->count++] = strdup( argv[0] ); + return 0; + } + + if( Db·query( db ,sql ,callback ,&result ) != SQLITE_OK ){ + for( int i = 0; i < result.count; ++i ){ + free( result.entries[i] ); + } + free( result.entries ); + return NULL; + } + + *count = result.count; + return result.entries; + } + +#endif // DbSubu·IMPLEMENTATION diff --git a/developer/cc/Db_close.cli.c b/developer/cc/Db_close.cli.c new file mode 100644 index 0000000..8575260 --- /dev/null +++ b/developer/cc/Db_close.cli.c @@ -0,0 +1,32 @@ +#define IFACE +#include +#include +#include +#include +#include "Db.lib.c" + +int main(int argc ,char *argv[]){ + if( argc < 2 ){ + fprintf(stderr ,"Usage: %s \n" ,argv[0]); + return EXIT_FAILURE; + } + + // Parse the SQLite handle from the command-line argument + uintptr_t handle_as_int; + if( sscanf(argv[1] ,"%lx" ,&handle_as_int) != 1 ){ + fprintf(stderr ,"%s::main failed to parse handle '%s'\n" ,argv[0] ,argv[1]); + return EXIT_FAILURE; + } + + sqlite3 *db = (sqlite3 *)handle_as_int; + + // Attempt to close the database + if( db ){ + Db·close(db); + printf("Database handle %p closed successfully.\n" ,db); + return EXIT_SUCCESS; + } else { + fprintf(stderr ,"Invalid or NULL database handle: %p\n" ,db); + return EXIT_FAILURE; + } +} diff --git a/developer/cc/Hello.cli.c b/developer/cc/Hello.cli.c new file mode 100644 index 0000000..2a18583 --- /dev/null +++ b/developer/cc/Hello.cli.c @@ -0,0 +1,67 @@ + +/* + The subu server command line interface. + + Usage: + server [-s ] [-l ] [arguments...] + + Options: + -s Specify the Unix socket file path. Default: ./socket + -l Specify the log file path. Default: ./log.txt +*/ + +#define IFACE +#include +#include +#include +#include +#include "Hello.lib.c" + +// Define defaults +#define DEFAULT_SOCKET_PATH "socket" + +int main( int argc ,char **argv ){ + char *socket_path = DEFAULT_SOCKET_PATH; + int error_flag = 0; + + int opt; + while( (opt = getopt(argc ,argv ,":s:l:")) != -1 ){ + switch( opt ){ + case 's': + socket_path = optarg; + break; + case '?': // Unknown option + fprintf( stderr ,"%s::main unknown option '-%c'\n" ,argv[0] ,optopt ); + error_flag = 1; + break; + case ':': // Missing argument + fprintf( stderr ,"%s::main missing argument for option '-%c'\n" ,argv[0] ,optopt ); + error_flag = 1; + break; + } + } + + if( optind > argc ){ + fprintf( stderr ,"%s::main optind(%d) > argc(%d), which indicates an option parsing bug\n" ,argv[0] ,optind ,argc ); + error_flag = 1; + } + + // Exit on error after processing all options + if( error_flag ){ + fprintf( stderr ,"%s::main usage: %s [-s ] [arguments...]\n" ,argv[0] ,argv[0] ); + return EXIT_FAILURE; + } + + // Rebase argv to prepare for run + if(optind > 0){ + argv[optind - 1] = argv[0]; // Program name at the new base + argc -= (optind - 1); + argv += (optind - 1); + } + + // Log parsed options + printf( "%s::main socket_path='%s'\n" ,argv[0] ,socket_path ); + + // Call the hello function + return Hello·run(argc ,argv ,socket_path); +} diff --git a/developer/cc/Hello.lib.c b/developer/cc/Hello.lib.c new file mode 100644 index 0000000..28d0f19 --- /dev/null +++ b/developer/cc/Hello.lib.c @@ -0,0 +1,75 @@ +#ifndef IFACE +#define Hello·IMPLEMENTATION +#define IFACE +#endif + +#ifndef Hello·IFACE +#define Hello·IFACE + + // Necessary interface includes + // .. none + + // Interface prototypes + int Hello·run(int argc ,char** argv ,char *socket_path); + +#endif // Hello·IFACE + +#ifdef Hello·IMPLEMENTATION + + // Implementation-specific includes + #include + #include + #include + #include + #include + #include + #include + #include + + // Constants + #define Hello·SOCKET_PATH "/var/user_data/Thomas-developer/subu/developer/mockup/subu_server_home/subu_server.sock" + #define Hello·LOG_PATH "server_test.log" + #define Hello·BUFFER_SIZE 256 + + int Hello·run(int argc ,char** argv ,char *socket_path){ + (void)argc; // Suppress unused variable warnings + (void)argv; + + int client_fd; + struct sockaddr_un address; + char buffer[Hello·BUFFER_SIZE]; + + client_fd = socket(AF_UNIX ,SOCK_STREAM ,0); + if( client_fd == -1 ){ + perror("Hello·run:: error opening socket"); + return EXIT_FAILURE; + } + + // Configure server socket address + memset(&address ,0 ,sizeof(address)); + address.sun_family = AF_UNIX; + strncpy(address.sun_path ,socket_path ,sizeof(address.sun_path) - 1); + + // Connect to the server + if( connect(client_fd ,(struct sockaddr *)&address ,sizeof(address)) == -1 ){ + perror("Hello·run:: error connecting to server"); + close(client_fd); + return EXIT_FAILURE; + } + + // Send message to the server + char *out_buf = "hello\n"; + if( write(client_fd ,out_buf ,strlen(out_buf)) == -1 ){ + perror("Hello·run:: error writing to server"); + return EXIT_FAILURE; + } + + printf("Hello·run:: sent \"%s\"\n" ,out_buf); + + // Clean up + close(client_fd); + + return EXIT_SUCCESS; + } + +#endif // Hello·IMPLEMENTATION diff --git a/developer/cc/README.org b/developer/cc/README.org new file mode 100644 index 0000000..17433fb --- /dev/null +++ b/developer/cc/README.org @@ -0,0 +1,5 @@ + +These are currently not used. Eventually the main subu commands will be C programs and setuid root so that master users (and only master users) can seamlessly manipulate sub users. + +I started on this, then decided to let the scripts stabilize first. + diff --git a/developer/cc/Server.cli.c b/developer/cc/Server.cli.c new file mode 100644 index 0000000..6b38b3b --- /dev/null +++ b/developer/cc/Server.cli.c @@ -0,0 +1,92 @@ +/* + The subu server command line interface. + + Usage: + server [-s ] [-l ] [arguments...] + + Options: + -s Specify the Unix socket file path. Default: ./socket + -l Specify the log file path. Default: ./log.txt +*/ + +#define IFACE +#include +#include +#include +#include +#include "Server.lib.c" + +// Define defaults +#define DEFAULT_SOCKET_PATH "socket" +#define DEFAULT_LOG_PATH "log.txt" + +int main( int argc ,char **argv ){ + char *socket_path = DEFAULT_SOCKET_PATH; + char *log_path = DEFAULT_LOG_PATH; + int error_flag = 0; + + // Parse command-line options + int opt; + while( (opt = getopt(argc ,argv ,":s:l:")) != -1 ){ + switch( opt ){ + case 's': + socket_path = optarg; + break; + case 'l': + log_path = optarg; + break; + case '?': // Unknown option + fprintf( stderr ,"%s::main unknown option '-%c'\n" ,argv[0] ,optopt ); + error_flag = 1; + break; + case ':': // Missing argument + fprintf( stderr ,"%s::main missing argument for option '-%c'\n" ,argv[0] ,optopt ); + error_flag = 1; + break; + } + } + + if( optind > argc ){ + fprintf( stderr ,"%s::main optind(%d) > argc(%d), which indicates an option parsing bug\n" ,argv[0] ,optind ,argc ); + error_flag = 1; + } + + // Exit on error after processing all options + if( error_flag ){ + fprintf( stderr ,"%s::main usage: %s [-s ] [-l ] [arguments...]\n" ,argv[0] ,argv[0] ); + return EXIT_FAILURE; + } + + // Rebase argv to prepare for run + if(optind > 0){ + argv[optind - 1] = argv[0]; // Program name at the new base + argc -= (optind - 1); + argv += (optind - 1); + } + + // Open the log file + FILE *log_file = Server·open_log(log_path); + if( !log_file ){ + fprintf( stderr ,"%s::main unable to open log file '%s'\n" ,argv[0] ,log_path ); + return Server·EXIT_LOG_FILE_ERROR; + } + + // Log parsed options + fprintf( log_file ,"%s::main socket_path='%s'\n" ,argv[0] ,socket_path ); + fprintf( log_file ,"%s::main log_path='%s'\n" ,argv[0] ,log_path ); + fflush(log_file); + + // Prepare file descriptors for error reporting + int fds[] = { fileno(stderr), fileno(log_file), -1 }; + + // Call the core server function + int exit_code = Server·run(argc ,argv ,fds ,socket_path); + + // Report return condition + Server·return_condition_report(exit_code ,fds); + + // Clean up + fclose(log_file); + + return exit_code; +} diff --git a/developer/cc/Server.lib.c b/developer/cc/Server.lib.c new file mode 100644 index 0000000..16451a3 --- /dev/null +++ b/developer/cc/Server.lib.c @@ -0,0 +1,219 @@ +#ifndef IFACE +#define Server·IMPLEMENTATION +#define IFACE +#endif + +#ifndef Server·IFACE +#define Server·IFACE + + #include + #include + + // Exit codes + typedef enum { + Server·EXIT_SUCCESS = 0, + Server·EXIT_LOG_FILE_ERROR, + Server·EXIT_SOCKET_CREATION_ERROR, + Server·EXIT_BIND_ERROR, + Server·EXIT_LISTEN_ERROR, + Server·EXIT_ACCEPT_ERROR + } Server·ExitCode; + + // Interface prototypes + int Server·run( int argc ,char **argv ,int *fds ,char *socket_path ); + void Server·return_condition_report( Server·ExitCode code ,int *fds ); + void Server·report( int *fds ,const char *message ); + FILE* Server·open_log( const char *log_path ); + +#endif // Server·IFACE + +#ifdef Server·IMPLEMENTATION + + // Implementation-specific includes + #include + #include + #include + #include + #include // Ensure full definition of struct ucred + #include + #include + #include + #include + + // Constants + #define Server·BUFFER_SIZE 256 + #define MAX_ARGC 16 + + // Internal function prototypes + static void parse( int *fds ,struct ucred *client_cred ,char *input_line ); + static void hello( int *fds ,int argc ,char *argv[] ,struct ucred *client_cred ); + + // Log a message with time and to multiple destinations + void Server·report( int *fds ,const char *message ){ + time_t now = time(NULL); + char time_buffer[32]; + strftime(time_buffer ,sizeof(time_buffer) ,"%Y-%m-%dT%H:%M:%SZ" ,gmtime(&now)); + + for( int i = 0; fds[i] != -1; ++i ){ + dprintf( fds[i] ,"\n%s:: %s" ,time_buffer ,message ); + } + } + + int Server·run( int argc ,char **argv ,int *fds ,char *socket_path ){ + (void)argc; // Suppress unused variable warnings + (void)argv; + + int server_fd ,client_fd; + struct sockaddr_un address; + + // Create socket + if( (server_fd = socket(AF_UNIX ,SOCK_STREAM ,0)) == -1 ){ + Server·report(fds ,"Socket creation failed."); + return Server·EXIT_SOCKET_CREATION_ERROR; + } + + // Configure socket address + memset(&address ,0 ,sizeof(address)); + address.sun_family = AF_UNIX; + strncpy(address.sun_path ,socket_path ,sizeof(address.sun_path) - 1); + + unlink(socket_path); + if( bind(server_fd ,(struct sockaddr *)&address ,sizeof(address)) == -1 ){ + Server·report(fds ,"Binding socket failed."); + close(server_fd); + return Server·EXIT_BIND_ERROR; + } + + if( listen(server_fd ,5) == -1 ){ + Server·report(fds ,"Listening on socket failed."); + close(server_fd); + return Server·EXIT_LISTEN_ERROR; + } + + char startup_message[Server·BUFFER_SIZE]; + snprintf(startup_message ,Server·BUFFER_SIZE ,"Server running with socket '%s' ,awaiting connections..." ,socket_path); + Server·report(fds ,startup_message); + + while( (client_fd = accept(server_fd ,NULL ,NULL)) != -1 ){ + struct ucred client_cred; + socklen_t len = sizeof(client_cred); + + if( getsockopt(client_fd ,SOL_SOCKET ,SO_PEERCRED ,&client_cred ,&len) == -1 ){ + Server·report(fds ,"Failed to retrieve client credentials."); + close(client_fd); + continue; + } + + char connection_message[Server·BUFFER_SIZE]; + snprintf(connection_message ,Server·BUFFER_SIZE , + "Connection from PID=%d ,UID=%d ,GID=%d" , + client_cred.pid ,client_cred.uid ,client_cred.gid); + Server·report(fds ,connection_message); + + char buffer[Server·BUFFER_SIZE]; + memset(buffer ,0 ,Server·BUFFER_SIZE); + ssize_t bytes_read = read(client_fd ,buffer ,Server·BUFFER_SIZE - 1); + if(bytes_read > 0){ + char *line = strtok(buffer ,"\n"); + while(line != NULL){ + parse(fds ,&client_cred ,line); + line = strtok(NULL ,"\n"); + } + } else if(bytes_read == -1){ + Server·report(fds ,"Error reading from client."); + } + + close(client_fd); + } + + Server·report(fds ,"Error accepting connection."); + close(server_fd); + unlink(socket_path); + return Server·EXIT_ACCEPT_ERROR; + } + + // Parse a single input line and dispatch to the appropriate command + static void parse( int *fds ,struct ucred *client_cred ,char *input_line ){ + char *argv[MAX_ARGC + 1] = {0}; + int argc = 0; + + char *line_copy = strdup(input_line); + if(!line_copy){ + Server·report(fds ,"Failed to duplicate input line."); + return; + } + + char *token = strtok(line_copy ," "); + while(token != NULL && argc < MAX_ARGC){ + argv[argc++] = token; + token = strtok(NULL ," "); + } + + if(argc > 0){ + if( strcmp(argv[0] ,"hello") == 0 ){ + hello(fds ,argc ,argv ,client_cred); + }else{ + char unknown_command_message[Server·BUFFER_SIZE]; + snprintf(unknown_command_message ,Server·BUFFER_SIZE ,"Unknown command '%s'" ,argv[0]); + Server·report(fds ,unknown_command_message); + } + } + + free(line_copy); + } + + // Example command: hello + static void hello( int *fds ,int argc ,char *argv[] ,struct ucred *client_cred ){ + char hello_message[Server·BUFFER_SIZE]; + snprintf(hello_message ,Server·BUFFER_SIZE , + "hello:: invoked by PID=%d ,UID=%d ,GID=%d" , + client_cred->pid ,client_cred->uid ,client_cred->gid); + Server·report(fds ,hello_message); + + for( int i = 1; i < argc; ++i ){ + char argument_message[Server·BUFFER_SIZE]; + snprintf(argument_message ,Server·BUFFER_SIZE ," Arg %d: %s" ,i ,argv[i]); + Server·report(fds ,argument_message); + } + } + + // Error reporting function + void Server·return_condition_report( Server·ExitCode code ,int *fds ){ + const char *message; + switch( code ){ + case Server·EXIT_SUCCESS: + message = "Operation completed successfully."; + break; + case Server·EXIT_LOG_FILE_ERROR: + message = "Failed to open log file."; + break; + case Server·EXIT_SOCKET_CREATION_ERROR: + message = "Socket creation failed."; + break; + case Server·EXIT_BIND_ERROR: + message = "Binding socket failed."; + break; + case Server·EXIT_LISTEN_ERROR: + message = "Listening on socket failed."; + break; + case Server·EXIT_ACCEPT_ERROR: + message = "Error accepting connection."; + break; + default: + message = "Unknown error occurred."; + break; + } + + Server·report(fds ,message); + } + + // Log file opener + FILE* Server·open_log( const char *log_path ){ + FILE *log_file = fopen(log_path ,"a+"); + if( log_file ){ + Server·report( (int[]){fileno(log_file), -1} ,"Log file opened."); + } + return log_file; + } + +#endif // Server·IMPLEMENTATION diff --git a/developer/cc/db_add_user.cli.c b/developer/cc/db_add_user.cli.c new file mode 100644 index 0000000..9ae9874 --- /dev/null +++ b/developer/cc/db_add_user.cli.c @@ -0,0 +1,36 @@ +#define IFACE +#include +#include +#include +#include "DbSubu.lib.c" + +int main(int argc ,char *argv[]){ + if( argc < 7 ){ + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + + const char *db_path = argv[1]; + const char *name = argv[2]; + const char *home_directory = argv[3]; + int shell_id = atoi(argv[4]); + int parent_id = atoi(argv[5]); + int user_type_id = atoi(argv[6]); + + DbSubu *db = DbSubu·open(db_path); + if( !db ){ + fprintf(stderr, "Failed to open database: %s\n", db_path); + return 1; + } + + int result = DbSubu·add_user(db, name, home_directory, shell_id, parent_id, user_type_id); + DbSubu·close(db); + + if( result == 0 ){ + printf("User added successfully.\n"); + return 0; + } else { + fprintf(stderr, "Failed to add user.\n"); + return 1; + } +} diff --git a/developer/cc/db_delete_user.cli.c b/developer/cc/db_delete_user.cli.c new file mode 100644 index 0000000..cf2e621 --- /dev/null +++ b/developer/cc/db_delete_user.cli.c @@ -0,0 +1,32 @@ +#define IFACE +#include +#include +#include +#include "DbSubu.lib.c" + +int main(int argc ,char *argv[]){ + if( argc < 3 ){ + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + + const char *db_path = argv[1]; + int user_id = atoi(argv[2]); + + DbSubu *db = DbSubu·open(db_path); + if( !db ){ + fprintf(stderr, "Failed to open database: %s\n", db_path); + return 1; + } + + int result = DbSubu·delete_user(db, user_id); + DbSubu·close(db); + + if( result == 0 ){ + printf("User deleted successfully.\n"); + return 0; + } else { + fprintf(stderr, "Failed to delete user.\n"); + return 1; + } +} diff --git a/developer/cc/db_log_event.cli.c b/developer/cc/db_log_event.cli.c new file mode 100644 index 0000000..cf2e621 --- /dev/null +++ b/developer/cc/db_log_event.cli.c @@ -0,0 +1,32 @@ +#define IFACE +#include +#include +#include +#include "DbSubu.lib.c" + +int main(int argc ,char *argv[]){ + if( argc < 3 ){ + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + + const char *db_path = argv[1]; + int user_id = atoi(argv[2]); + + DbSubu *db = DbSubu·open(db_path); + if( !db ){ + fprintf(stderr, "Failed to open database: %s\n", db_path); + return 1; + } + + int result = DbSubu·delete_user(db, user_id); + DbSubu·close(db); + + if( result == 0 ){ + printf("User deleted successfully.\n"); + return 0; + } else { + fprintf(stderr, "Failed to delete user.\n"); + return 1; + } +} diff --git a/developer/cc/db_open.cli.c b/developer/cc/db_open.cli.c new file mode 100644 index 0000000..f64ba5d --- /dev/null +++ b/developer/cc/db_open.cli.c @@ -0,0 +1,34 @@ +#define IFACE +#include +#include +#include +#include "Db.lib.c" + +// Define default database path +#define DEFAULT_DB_PATH "db.sqlite" + +int main(int argc ,char *argv[]){ + const char *db_path = (argc > 1) ? argv[1] : DEFAULT_DB_PATH; + + // Open the database using Db·open + sqlite3 *db = Db·open(db_path ,true); + if( !db ){ + fprintf(stderr ,"Failed to open or create database: %s\n" ,db_path); + return EXIT_FAILURE; + } + + // Check if the file was created or already existed + printf("Database %s opened successfully\n" ,db_path); + + // Attempt to close the database + if( db ){ + Db·close(db); + printf("Database handle %p closed successfully.\n" ,db); + return EXIT_SUCCESS; + } else { + fprintf(stderr ,"Invalid or NULL database handle: %p\n" ,db); + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; +} diff --git a/developer/cc/db_validate_schema.cli.c b/developer/cc/db_validate_schema.cli.c new file mode 100644 index 0000000..88d20e1 --- /dev/null +++ b/developer/cc/db_validate_schema.cli.c @@ -0,0 +1,25 @@ +#define IFACE +#include +#include +#include +#include "DbSubu.lib.c" + +int main(int argc ,char *argv[]){ + const char *db_path = (argc > 1) ? argv[1] : "db.sqlite"; + DbSubu *db = DbSubu·open(db_path); + if( !db ){ + fprintf(stderr, "Failed to open database: %s\n", db_path); + return 1; + } + + int result = DbSubu·validate_schema(db); + DbSubu·close(db); + + if( result == 0 ){ + printf("Schema validation passed.\n"); + return 0; + } else { + fprintf(stderr, "Schema validation failed.\n"); + return 1; + } +} diff --git a/developer/cc/scratchpad/.gitignore b/developer/cc/scratchpad/.gitignore new file mode 100644 index 0000000..120f485 --- /dev/null +++ b/developer/cc/scratchpad/.gitignore @@ -0,0 +1,2 @@ +* +!/.gitignore diff --git a/developer/cc/sqlite/schema.sql b/developer/cc/sqlite/schema.sql new file mode 100644 index 0000000..faf7053 --- /dev/null +++ b/developer/cc/sqlite/schema.sql @@ -0,0 +1,79 @@ +-- Schema for the subu server +-- + +-- List Tables +-- SQLite does not support PSQL style types +-- +-- CREATE TYPE List AS ( +-- id SERIAL, -- Integer ID +-- name TEXT NOT NULL -- Name of the list entry +-- ); +-- +-- so though these all have the same `List` form, they are declared independently +-- + CREATE TABLE db_property_list ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE + ); + + CREATE TABLE db_event_list ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE + ); + + CREATE TABLE shell_list ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE + ); + + CREATE TABLE system_resource_list ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE + ); + + CREATE TABLE user_type_list ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT NOT NULL UNIQUE + ); + +-- Data Tables +-- + CREATE TABLE db_property ( + id INTEGER PRIMARY KEY, + property_id INTEGER NOT NULL REFERENCES db_property_list(id), + type TEXT NOT NULL, + value TEXT, + created_at DATETIME DEFAULT CURRENT_TIMESTAMP + ); + + CREATE TABLE db_event ( + id INTEGER PRIMARY KEY, + event_time DATETIME DEFAULT CURRENT_TIMESTAMP, + event_id INTEGER NOT NULL REFERENCES db_event_list(id), + user_id INTEGER REFERENCES user(id) + ); + + CREATE TABLE user ( + id INTEGER PRIMARY KEY, + login_gid INTEGER NOT NULL UNIQUE, + name TEXT NOT NULL UNIQUE, + home_directory TEXT NOT NULL, + shell INTEGER NOT NULL REFERENCES shell_list(id), + parent_id INTEGER REFERENCES user(id), + user_type_id INTEGER NOT NULL REFERENCES user_type_list(id), + status TEXT DEFAULT 'active' + ); + + CREATE TABLE share ( + id INTEGER PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES user(id), + other_user_id INTEGER NOT NULL REFERENCES user(id), + permissions TEXT NOT NULL + ); + + CREATE TABLE system_resource ( + id INTEGER PRIMARY KEY, + user_id INTEGER NOT NULL REFERENCES user(id), + resource_id INTEGER NOT NULL REFERENCES system_resource_list(id), + granted_by INTEGER REFERENCES user(id) + ); diff --git a/developer/deprecated/.gitignore b/developer/deprecated/.gitignore new file mode 100644 index 0000000..120f485 --- /dev/null +++ b/developer/deprecated/.gitignore @@ -0,0 +1,2 @@ +* +!/.gitignore diff --git a/developer/deprecated/server.lib.c b/developer/deprecated/server.lib.c new file mode 100644 index 0000000..baf5469 --- /dev/null +++ b/developer/deprecated/server.lib.c @@ -0,0 +1,104 @@ +#ifndef IFACE +#define Server·IMPLEMENTATION +#define IFACE +#endif + +#ifndef Server·IFACE +#define Server·IFACE + + // Necessary interface includes + #include + #include + #include + + // Interface prototypes + int Server·run(); + +#endif // Server·IFACE + +#ifdef Server·IMPLEMENTATION + + // Implementation-specific includes + #include + #include + #include + #include + #include + + // Constants + #define Server·SOCKET_PATH "/var/user_data/Thomas-developer/subu/developer/mockup/subu_server_home/subu_server.sock" + #define Server·LOG_PATH "server.log" + #define Server·BUFFER_SIZE 256 + + int Server·run(){ + int server_fd ,client_fd; + struct sockaddr_un address; + char buffer[Server·BUFFER_SIZE]; + FILE *log_file; + + // Open the log file + log_file = fopen(Server·LOG_PATH ,"a+"); + if( log_file == NULL ){ + perror("Server·run:: error opening log file"); + return EXIT_FAILURE; + } + + // Create the socket + if( (server_fd = socket(AF_UNIX ,SOCK_STREAM ,0)) == -1 ){ + perror("Server·run:: error creating socket"); + fclose(log_file); + return EXIT_FAILURE; + } + + // Configure socket address + memset(&address ,0 ,sizeof(address)); + address.sun_family = AF_UNIX; + strncpy(address.sun_path ,Server·SOCKET_PATH ,sizeof(address.sun_path) - 1); + + // Bind the socket + unlink(Server·SOCKET_PATH); // Remove existing file if present + if( bind(server_fd ,(struct sockaddr *)&address ,sizeof(address)) == -1 ){ + perror("Server·run:: error binding socket"); + fclose(log_file); + close(server_fd); + return EXIT_FAILURE; + } + + // Listen for connections + if( listen(server_fd ,5) == -1 ){ + perror("Server·run:: error listening on socket"); + fclose(log_file); + close(server_fd); + return EXIT_FAILURE; + } + + printf("Server·run:: server running, waiting for connections...\n"); + + // Accept and handle client connections + while( (client_fd = accept(server_fd ,NULL ,NULL)) != -1 ){ + ssize_t bytes_read; + printf("Server·run:: connection made!\n"); + + memset(buffer ,0 ,Server·BUFFER_SIZE); + bytes_read = read(client_fd ,buffer ,Server·BUFFER_SIZE - 1); + if( bytes_read > 0 ){ + printf("Server·run:: connection said: %s\n" ,buffer); + fprintf(log_file ,"Received: %s\n" ,buffer); + fflush(log_file); + } else if( bytes_read == -1 ){ + perror("Server·run:: error reading from client"); + } + + close(client_fd); + } + + // Clean up + perror("Server·run:: error accepting connection"); + fclose(log_file); + close(server_fd); + unlink(Server·SOCKET_PATH); + + return EXIT_FAILURE; + } + +#endif // Server·IMPLEMENTATION diff --git a/developer/device_management/bestow_audio.sh b/developer/device_management/bestow_audio.sh new file mode 100755 index 0000000..20545e8 --- /dev/null +++ b/developer/device_management/bestow_audio.sh @@ -0,0 +1,76 @@ +#!/bin/bash +# give_audio.sh — run as master user "Thomas" +# Usage: ./give_audio.sh +# Example: ./give_audio.sh Thomas-US # give card to subuser +# ./give_audio.sh Thomas # reclaim for master + +set -euo pipefail + +target="${1-}" +if [[ -z "$target" ]]; then + echo "❌ usage: $0 "; exit 2 +fi + +master="Thomas" + +# don't use sudo -v as it dumps the password into the emacs shell +sudo echo >& /dev/null + +run() { echo "+ $*"; eval "$*"; } + +# --- sanity checks --- +if ! id "$target" &>/dev/null; then + echo "❌ user not found: $target"; exit 1 +fi +if [[ "$(id -un)" != "$master" ]]; then + echo "❌ must be run as master user '$master'"; exit 1 +fi + +# Gather all subusers (Thomas-*) +mapfile -t subusers < <(getent passwd | awk -F: '$1 ~ /^'"$master"'-/ {print $1}' | sort) + +stop_master_audio() { + run "systemctl --user stop pipewire pipewire-pulse wireplumber || true" +} + +start_master_audio() { + # start services (not only sockets) to avoid lazy-activation races + run "systemctl --user start pipewire.service pipewire-pulse.service wireplumber.service" +} + +stop_subu_audio() { + local u="$1" + run "sudo machinectl shell ${u}@ /bin/bash -lc 'systemctl --user stop pipewire pipewire-pulse wireplumber || true'" +} + +start_subu_audio() { + local u="$1" + # Keep subuser from trying to bind to logind (not the active seat) + run "sudo machinectl shell ${u}@ /bin/bash -lc 'export WIREPLUMBER_DISABLE_PLUGINS=logind; systemctl --user import-environment WIREPLUMBER_DISABLE_PLUGINS; systemctl --user start pipewire.service pipewire-pulse.service wireplumber.service'" +} + +# --- stop everyone first (to release ALSA cleanly) --- +stop_master_audio +for u in "${subusers[@]}"; do + stop_subu_audio "$u" +done + +# Small settle time so ALSA reservation clears +sleep 0.5 + +# --- start only the target --- +if [[ "$target" == "$master" ]]; then + start_master_audio +else + # ensure linger for target so user services can run + run "sudo loginctl enable-linger '$target' || true" + start_subu_audio "$target" +fi + +# --- quick verification (best-effort) --- +if [[ "$target" == "$master" ]]; then + # Show default sink name (may require pipewire-pulse to be fully up) + run "pactl info | sed -n 's/^Default Sink: /Default Sink: /p'" +else + run "sudo machinectl shell ${target}@ /bin/bash -lc 'pactl info | sed -n \\\"s/^Default Sink: /Default Sink: /p\\\"'" +fi diff --git a/developer/login/login_to_subu.sh b/developer/login/login_to_subu.sh new file mode 100755 index 0000000..ca28743 --- /dev/null +++ b/developer/login/login_to_subu.sh @@ -0,0 +1,135 @@ +#!/bin/bash +# launch_subu.sh — Start a subuser shell (console or GUI-aware, with systemd user session) + +set -euo pipefail +umask 0077 + +subu="$1" +if [ -z "$subu" ]; then + echo "❌ No subuser name supplied" + exit 1 +fi + +subu_user="Thomas-$subu" +if ! id "$subu_user" &>/dev/null; then + echo "❌ User $subu_user does not exist" + exit 1 +fi + +# Check required commands +error_flag=0 +for cmd in machinectl xauth xhost dbus-run-session; do + if ! command -v "$cmd" &>/dev/null; then + echo "❌ $cmd not found" + error_flag=1 + fi +done +if [ "$error_flag" -eq 1 ]; then + exit 1 +fi + +# don't use sudo -v, because it will echo the password into the emacs shell +sudo echo >& /dev/null + + +# Something broke when I turned this off. What was it. Will have to turn it off again and +# test. +# +# Enable lingering so user services can persist +sudo loginctl enable-linger "$subu_user" + +# Decide how to set the use_xauth and use_xhost flags. +# +# As of the time of this writing, on my machines, Wayland insists on +# xauth, while my X11 is refuses to use it, thus it needs xhost control. +# So this is how I determine how to set the flags here. +# + +# bash will evaluate this variables inside a quoted if even when the +# gate is falase, so everything needs to be initialized, whether used +# or not. +subu_Xauthority_path="" +use_xauth=0 +use_xhost=0 +if [[ -n "${WAYLAND_DISPLAY:-}" ]]; then + has_display=true + XDG_SESSION_TYPE="wayland" + subu_Xauthority_path="$HOME/subu/$subu/.Xauthority" + use_xauth=1 + use_xhost=0 + echo "🌀 Wayland session - Using xauth for access control" + +elif [[ -n "${DISPLAY:-}" ]]; then + has_display=true + XDG_SESSION_TYPE="x11" + use_xauth=0 + use_xhost=1 + echo "🧱 X11 session - Using xhost for access control" + +else + has_display=false + XDG_SESSION_TYPE="tty" + use_xauth=0 + use_xhost=0 + echo "🖳 Console session (no X detected)" +fi + +if [[ "$use_xhost" -eq 1 ]]; then + xhost +SI:localuser:"$subu_user" +fi +if [[ "$use_xauth" -eq 1 ]]; then + mkdir -p "$(dirname "$subu_Xauthority_path")" + touch "$subu_Xauthority_path" + xauth extract "$subu_Xauthority_path" "$DISPLAY" +fi + +if $has_display; then + + + sudo machinectl shell "$subu_user"@ /bin/bash -c " + + # --- session env from parent --- + export DISPLAY=\"${DISPLAY:-${WAYLAND_DISPLAY}}\"; + export XDG_RUNTIME_DIR='/run/user/$(id -u "$subu_user")'; + export XDG_SESSION_TYPE=\"$XDG_SESSION_TYPE\"; + export XDG_SESSION_CLASS=\"user\"; + export XDG_DATA_DIRS=\"/usr/share/gnome:/usr/local/share/:/usr/share/\"; + export USE_XAUTH=$use_xauth + + # Only set XAUTHORITY when we actually prepared it (Wayland/xauth case) + if [[ \"\$USE_XAUTH\" -eq 1 ]]; then + export XAUTHORITY=\"$subu_Xauthority_path\" + fi + + if command -v /usr/bin/gnome-keyring-daemon &>/dev/null; then + eval \$(/usr/bin/gnome-keyring-daemon --start) + export GNOME_KEYRING_CONTROL GNOME_KEYRING_PID + fi + + # WirePlumber: ignore logind (subuser isn't the active seat) + systemctl --user set-environment WIREPLUMBER_DISABLE_PLUGINS=logind + systemctl --user import-environment DISPLAY XAUTHORITY WAYLAND_DISPLAY XDG_RUNTIME_DIR XDG_SESSION_TYPE + + # Bring up audio (sockets first, then services) + systemctl --user enable --now pipewire.socket pipewire-pulse.socket >/dev/null 2>&1 || true + systemctl --user restart wireplumber pipewire pipewire-pulse + + exec dbus-run-session -- bash -l + " + +else + + # Console mode with DBus session (give it audio too) + sudo machinectl shell "$subu_user"@ /bin/bash -c " + export XDG_RUNTIME_DIR='/run/user/$(id -u "$subu_user")}'; + + systemctl --user set-environment WIREPLUMBER_DISABLE_PLUGINS=logind + systemctl --user import-environment XDG_RUNTIME_DIR + systemctl --user enable --now pipewire.socket pipewire-pulse.socket >/dev/null 2>&1 || true + systemctl --user restart wireplumber pipewire pipewire-pulse + + exec dbus-run-session -- bash -l + " +fi + + diff --git a/developer/login/logout_subu.sh b/developer/login/logout_subu.sh new file mode 100644 index 0000000..a4a4206 --- /dev/null +++ b/developer/login/logout_subu.sh @@ -0,0 +1,3 @@ +# As root: +# loginctl terminate-user Thomas-US +loginctl terminate-user $1 diff --git a/developer/manager/CLI.py b/developer/manager/CLI.py new file mode 100755 index 0000000..a79691e --- /dev/null +++ b/developer/manager/CLI.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 +# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*- +""" +CLI.py — thin command-line harness +Version: 0.2.0 +""" +import sys, argparse +from text import USAGE, HELP, EXAMPLE, VERSION +import core + +def CLI(argv=None) -> int: + argv = argv or sys.argv[1:] + if not argv: + print(USAGE) + return 0 + + # simple verbs that bypass argparse (so `help/version/example` always work) + simple = {"help": HELP, "--help": HELP, "-h": HELP, "usage": USAGE, "example": EXAMPLE, "version": VERSION} + if argv[0] in simple: + out = simple[argv[0]] + print(out if isinstance(out, str) else out()) + return 0 + + p = argparse.ArgumentParser(prog="subu", add_help=False) + p.add_argument("-V", "--Version", action="store_true", help="print version") + sub = p.add_subparsers(dest="verb") + + # init + ap = sub.add_parser("init") + ap.add_argument("token", nargs="?") + + # create/list/info + ap = sub.add_parser("create") + ap.add_argument("owner") + ap.add_argument("name") + + sub.add_parser("list") + ap = sub.add_parser("info"); ap.add_argument("subu_id") + ap = sub.add_parser("information"); ap.add_argument("subu_id") + + # lo + ap = sub.add_parser("lo") + ap.add_argument("state", choices=["up","down"]) + ap.add_argument("subu_id") + + # WG + ap = sub.add_parser("WG") + ap.add_argument("verb", choices=["global","create","server_provided_public_key","info","information","up","down"]) + ap.add_argument("arg1", nargs="?") + ap.add_argument("arg2", nargs="?") + + # attach/detach + ap = sub.add_parser("attach") + ap.add_argument("what", choices=["WG"]) + ap.add_argument("subu_id") + ap.add_argument("wg_id") + + ap = sub.add_parser("detach") + ap.add_argument("what", choices=["WG"]) + ap.add_argument("subu_id") + + # network + ap = sub.add_parser("network") + ap.add_argument("state", choices=["up","down"]) + ap.add_argument("subu_id") + + # option + ap = sub.add_parser("option") + ap.add_argument("verb", choices=["set","get","list"]) + ap.add_argument("subu_id") + ap.add_argument("name", nargs="?") + ap.add_argument("value", nargs="?") + + # exec + ap = sub.add_parser("exec") + ap.add_argument("subu_id") + ap.add_argument("--", dest="cmd", nargs=argparse.REMAINDER, default=[]) + + ns = p.parse_args(argv) + if ns.Version: + print(VERSION); return 0 + + try: + if ns.verb == "init": + return core.cmd_init(ns.token) + + if ns.verb == "create": + core.create_subu(ns.owner, ns.name); return 0 + if ns.verb == "list": + core.list_subu(); return 0 + if ns.verb in ("info","information"): + core.info_subu(ns.subu_id); return 0 + + if ns.verb == "lo": + core.lo_toggle(ns.subu_id, ns.state); return 0 + + if ns.verb == "WG": + v = ns.verb + if ns.arg1 is None and v in ("info","information"): + print("WG info requires WG_ID"); return 2 + if v == "global": + core.wg_global(ns.arg1); return 0 + if v == "create": + wid = core.wg_create(ns.arg1); print(wid); return 0 + if v == "server_provided_public_key": + core.wg_set_pubkey(ns.arg1, ns.arg2); return 0 + if v in ("info","information"): + core.wg_info(ns.arg1); return 0 + if v == "up": + core.wg_up(ns.arg1); return 0 + if v == "down": + core.wg_down(ns.arg1); return 0 + + if ns.verb == "attach": + if ns.what == "WG": + core.attach_wg(ns.subu_id, ns.wg_id); return 0 + + if ns.verb == "detach": + if ns.what == "WG": + core.detach_wg(ns.subu_id); return 0 + + if ns.verb == "network": + core.network_toggle(ns.subu_id, ns.state); return 0 + + if ns.verb == "option": + if ns.verb == "option" and ns.name is None and ns.value is None and ns.verb == "list": + core.option_list(ns.subu_id); return 0 + if ns.verb == "set": + core.option_set(ns.subu_id, ns.name, ns.value); return 0 + if ns.verb == "get": + core.option_get(ns.subu_id, ns.name); return 0 + if ns.verb == "list": + core.option_list(ns.subu_id); return 0 + + if ns.verb == "exec": + if not ns.cmd: + print("subu exec -- ..."); return 2 + core.exec_in_subu(ns.subu_id, ns.cmd); return 0 + + print(USAGE); return 2 + except Exception as e: + print(f"error: {e}") + return 1 + +if __name__ == "__main__": + sys.exit(CLI()) diff --git a/developer/manager/bpf_force_egress.c b/developer/manager/bpf_force_egress.c new file mode 100644 index 0000000..c3aedec --- /dev/null +++ b/developer/manager/bpf_force_egress.c @@ -0,0 +1,43 @@ +// -*- mode: c; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*- +// bpf_force_egress.c — MVP scaffold to validate UID and prep metadata +// Version 0.2.0 +#include +#include +#include + +char LICENSE[] SEC("license") = "GPL"; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, __u32); // tgid + __type(value, __u32); // reserved (target ifindex placeholder) + __uint(max_entries, 1024); +} subu_tgid2if SEC(".maps"); + +// Helper: return 0 = allow, <0 reject +static __always_inline int allow_uid(struct bpf_sock_addr *ctx) { + // MVP: just accept everyone; you can gate on UID 2017 with bpf_get_current_uid_gid() + // __u32 uid = (__u32)(bpf_get_current_uid_gid() & 0xffffffff); + // if (uid != 2017) return -1; + return 0; +} + +// Hook: cgroup/connect4 — runs before connect(2) proceeds +SEC("cgroup/connect4") +int subu_connect4(struct bpf_sock_addr *ctx) +{ + if (allow_uid(ctx) < 0) return -1; + // Future: read pinned map/meta, set SO_* via bpf_setsockopt when permitted + return 0; +} + +// Hook: cgroup/post_bind4 — runs after a local bind is chosen +SEC("cgroup/post_bind4") +int subu_post_bind4(struct bpf_sock *sk) +{ + // Future: enforce bound dev if kernel helper allows; record tgid->ifindex + __u32 tgid = bpf_get_current_pid_tgid() >> 32; + __u32 val = 0; + bpf_map_update_elem(&subu_tgid2if, &tgid, &val, BPF_ANY); + return 0; +} diff --git a/developer/manager/core.py b/developer/manager/core.py new file mode 100644 index 0000000..c363ec2 --- /dev/null +++ b/developer/manager/core.py @@ -0,0 +1,254 @@ +# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*- +""" +core.py — worker API for subu manager +Version: 0.2.0 +""" +import os, sqlite3, subprocess +from pathlib import Path +from contextlib import closing +from text import VERSION +from worker_bpf import ensure_mounts, install_steering, remove_steering, BpfError + +DB_FILE = Path("./subu.db") +WG_GLOBAL_FILE = Path("./WG_GLOBAL") + +def run(cmd, check=True): + r = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + if check and r.returncode != 0: + raise RuntimeError(f"cmd failed: {' '.join(cmd)}\n{r.stderr}") + return r.stdout.strip() + +# ---------------- DB ---------------- +def _db(): + if not DB_FILE.exists(): + raise FileNotFoundError("subu.db not found; run `subu init ` first") + return sqlite3.connect(DB_FILE) + +def cmd_init(token: str|None): + if DB_FILE.exists(): + raise FileExistsError("db already exists") + if not token or len(token) < 6: + raise ValueError("init requires a 6+ char token") + with closing(sqlite3.connect(DB_FILE)) as db: + c = db.cursor() + c.executescript(""" + CREATE TABLE subu ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + owner TEXT, + name TEXT, + netns TEXT, + lo_state TEXT DEFAULT 'down', + wg_id INTEGER, + network_state TEXT DEFAULT 'down' + ); + CREATE TABLE wg ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + endpoint TEXT, + local_ip TEXT, + allowed_ips TEXT, + pubkey TEXT, + state TEXT DEFAULT 'down' + ); + CREATE TABLE options ( + subu_id INTEGER, + name TEXT, + value TEXT, + PRIMARY KEY (subu_id, name) + ); + """) + db.commit() + print(f"created subu.db (v{VERSION})") + +# ------------- Subu ops ------------- +def create_subu(owner: str, name: str) -> str: + with closing(_db()) as db: + c = db.cursor() + subu_netns = f"ns-subu_tmp" # temp; we rename after ID known + c.execute("INSERT INTO subu (owner, name, netns) VALUES (?, ?, ?)", + (owner, name, subu_netns)) + sid = c.lastrowid + netns = f"ns-subu_{sid}" + c.execute("UPDATE subu SET netns=? WHERE id=?", (netns, sid)) + db.commit() + + # create netns + run(["ip", "netns", "add", netns]) + run(["ip", "-n", netns, "link", "set", "lo", "down"]) + print(f"Created subu_{sid} ({owner}:{name}) with netns {netns}") + return f"subu_{sid}" + +def list_subu(): + with closing(_db()) as db: + for row in db.execute("SELECT id, owner, name, netns, lo_state, wg_id, network_state FROM subu"): + print(row) + +def info_subu(subu_id: str): + sid = int(subu_id.split("_")[1]) + with closing(_db()) as db: + row = db.execute("SELECT * FROM subu WHERE id=?", (sid,)).fetchone() + if not row: + print("not found"); return + print(row) + wg = db.execute("SELECT wg_id FROM subu WHERE id=?", (sid,)).fetchone()[0] + if wg is not None: + wrow = db.execute("SELECT * FROM wg WHERE id=?", (wg,)).fetchone() + print("WG:", wrow) + opts = db.execute("SELECT name,value FROM options WHERE subu_id=?", (sid,)).fetchall() + print("Options:", opts) + +def lo_toggle(subu_id: str, state: str): + sid = int(subu_id.split("_")[1]) + with closing(_db()) as db: + ns = db.execute("SELECT netns FROM subu WHERE id=?", (sid,)).fetchone() + if not ns: raise ValueError("subu not found") + ns = ns[0] + run(["ip", "netns", "exec", ns, "ip", "link", "set", "lo", state]) + db.execute("UPDATE subu SET lo_state=? WHERE id=?", (state, sid)) + db.commit() + print(f"{subu_id}: lo {state}") + +# ------------- WG ops --------------- +def wg_global(basecidr: str): + WG_GLOBAL_FILE.write_text(basecidr.strip()+"\n") + print(f"WG pool base = {basecidr}") + +def _alloc_ip(idx: int, base: str) -> str: + # simplistic /24 allocator: base must be x.y.z.0/24 + prefix = base.split("/")[0].rsplit(".", 1)[0] + host = 2 + idx + return f"{prefix}.{host}/32" + +def wg_create(endpoint: str) -> str: + if not WG_GLOBAL_FILE.exists(): + raise RuntimeError("set WG base with `subu WG global ` first") + base = WG_GLOBAL_FILE.read_text().strip() + with closing(_db()) as db: + c = db.cursor() + idx = c.execute("SELECT COUNT(*) FROM wg").fetchone()[0] + local_ip = _alloc_ip(idx, base) + c.execute("INSERT INTO wg (endpoint, local_ip, allowed_ips) VALUES (?, ?, ?)", + (endpoint, local_ip, "0.0.0.0/0")) + wid = c.lastrowid + db.commit() + print(f"WG_{wid} endpoint={endpoint} ip={local_ip}") + return f"WG_{wid}" + +def wg_set_pubkey(wg_id: str, key: str): + wid = int(wg_id.split("_")[1]) + with closing(_db()) as db: + db.execute("UPDATE wg SET pubkey=? WHERE id=?", (key, wid)) + db.commit() + print("ok") + +def wg_info(wg_id: str): + wid = int(wg_id.split("_")[1]) + with closing(_db()) as db: + row = db.execute("SELECT * FROM wg WHERE id=?", (wid,)).fetchone() + print(row if row else "not found") + +def wg_up(wg_id: str): + wid = int(wg_id.split("_")[1]) + # Admin-up of WG device handled via network_toggle once attached. + print(f"{wg_id}: up (noop until attached)") + +def wg_down(wg_id: str): + wid = int(wg_id.split("_")[1]) + print(f"{wg_id}: down (noop until attached)") + +# ---------- attach/detach + BPF ---------- +def attach_wg(subu_id: str, wg_id: str): + ensure_mounts() + sid = int(subu_id.split("_")[1]); wid = int(wg_id.split("_")[1]) + with closing(_db()) as db: + r = db.execute("SELECT netns FROM subu WHERE id=?", (sid,)).fetchone() + if not r: raise ValueError("subu not found") + ns = r[0] + w = db.execute("SELECT endpoint, local_ip, pubkey FROM wg WHERE id=?", (wid,)).fetchone() + if not w: raise ValueError("WG not found") + endpoint, local_ip, pubkey = w + + ifname = f"subu_{wid}" + # create WG link in init ns, move to netns + run(["ip", "link", "add", ifname, "type", "wireguard"]) + run(["ip", "link", "set", ifname, "netns", ns]) + run(["ip", "-n", ns, "addr", "add", local_ip, "dev", ifname], check=False) + run(["ip", "-n", ns, "link", "set", "dev", ifname, "mtu", "1420"]) + run(["ip", "-n", ns, "link", "set", "dev", ifname, "down"]) # keep engine down until `network up` + + # install steering (MVP: create cgroup + attach bpf program) + try: + install_steering(subu_id, ns, ifname) + print(f"{subu_id}: eBPF steering installed -> {ifname}") + except BpfError as e: + print(f"{subu_id}: steering warning: {e}") + + with closing(_db()) as db: + db.execute("UPDATE subu SET wg_id=? WHERE id=?", (wid, sid)) + db.commit() + print(f"attached {wg_id} to {subu_id} in {ns} as {ifname}") + +def detach_wg(subu_id: str): + ensure_mounts() + sid = int(subu_id.split("_")[1]) + with closing(_db()) as db: + r = db.execute("SELECT netns,wg_id FROM subu WHERE id=?", (sid,)).fetchone() + if not r: print("not found"); return + ns, wid = r + if wid is None: + print("nothing attached"); return + ifname = f"subu_{wid}" + run(["ip", "-n", ns, "link", "del", ifname], check=False) + try: + remove_steering(subu_id) + except BpfError as e: + print(f"steering remove warn: {e}") + with closing(_db()) as db: + db.execute("UPDATE subu SET wg_id=NULL WHERE id=?", (sid,)) + db.commit() + print(f"detached WG_{wid} from {subu_id}") + +# ------------- network up/down ------------- +def network_toggle(subu_id: str, state: str): + sid = int(subu_id.split("_")[1]) + with closing(_db()) as db: + ns, wid = db.execute("SELECT netns,wg_id FROM subu WHERE id=?", (sid,)).fetchone() + # always make sure lo up on 'up' + if state == "up": + run(["ip", "netns", "exec", ns, "ip", "link", "set", "lo", "up"], check=False) + if wid is not None: + ifname = f"subu_{wid}" + run(["ip", "-n", ns, "link", "set", "dev", ifname, state], check=False) + with closing(_db()) as db: + db.execute("UPDATE subu SET network_state=? WHERE id=?", (state, sid)) + db.commit() + print(f"{subu_id}: network {state}") + +# ------------- options ---------------- +def option_set(subu_id: str, name: str, value: str): + sid = int(subu_id.split("_")[1]) + with closing(_db()) as db: + db.execute("INSERT INTO options (subu_id,name,value) VALUES(?,?,?) " + "ON CONFLICT(subu_id,name) DO UPDATE SET value=excluded.value", + (sid, name, value)) + db.commit() + print("ok") + +def option_get(subu_id: str, name: str): + sid = int(subu_id.split("_")[1]) + with closing(_db()) as db: + row = db.execute("SELECT value FROM options WHERE subu_id=? AND name=?", (sid,name)).fetchone() + print(row[0] if row else "") + +def option_list(subu_id: str): + sid = int(subu_id.split("_")[1]) + with closing(_db()) as db: + rows = db.execute("SELECT name,value FROM options WHERE subu_id=?", (sid,)).fetchall() + for n,v in rows: + print(f"{n}={v}") + +# ------------- exec ------------------- +def exec_in_subu(subu_id: str, cmd: list): + sid = int(subu_id.split("_")[1]) + with closing(_db()) as db: + ns = db.execute("SELECT netns FROM subu WHERE id=?", (sid,)).fetchone()[0] + os.execvp("ip", ["ip","netns","exec", ns] + cmd) diff --git a/developer/manager/subu b/developer/manager/subu new file mode 120000 index 0000000..45a8ec1 --- /dev/null +++ b/developer/manager/subu @@ -0,0 +1 @@ +CLI.py \ No newline at end of file diff --git a/developer/manager/temp.sh b/developer/manager/temp.sh new file mode 100644 index 0000000..36855b6 --- /dev/null +++ b/developer/manager/temp.sh @@ -0,0 +1,40 @@ +# from: /home/Thomas/subu_data/developer/project/active/subu/developer/source/manager + +set -euo pipefail + +echo "== 1) Backup legacy-prefixed modules ==" +mkdir -p _old_prefixed +for f in subu_*.py; do + [ -f "$f" ] && mv -v "$f" _old_prefixed/ +done +[ -f subu_worker_bpf.py ] && mv -v subu_worker_bpf.py _old_prefixed/ || true + +echo "== 2) Ensure only the new module names remain ==" +# Keep these (already present in your tar): +# CLI.py core.py text.py worker_bpf.py bpf_force_egress.c +ls -1 + +echo "== 3) Make CLI runnable as 'subu' ==" +# Make sure CLI has a shebang; add if missing +if ! head -n1 CLI.py | grep -q '^#!/usr/bin/env python3'; then + (printf '%s\n' '#!/usr/bin/env python3' ; cat CLI.py) > .CLI.tmp && mv .CLI.tmp CLI.py +fi +chmod +x CLI.py +ln -sf CLI.py subu +chmod +x subu + +echo "== 4) Quick import sanity ==" +# Fail if any of the remaining files still import the old module names +bad=$(grep -R --line-number -E 'import +subu_|from +subu_' -- *.py || true) +if [ -n "$bad" ]; then + echo "Found old-style imports; please fix:" >&2 + echo "$bad" >&2 + exit 1 +fi + +echo "== 5) Show version and help ==" +./subu version || true +./subu help || true +./subu || true # should print usage by default + +echo "== Done. If this looks good, you can delete _old_prefixed when ready. ==" diff --git a/developer/manager/test.sh b/developer/manager/test.sh new file mode 100644 index 0000000..706250b --- /dev/null +++ b/developer/manager/test.sh @@ -0,0 +1,13 @@ +#!/bin/env bash + +set -x +./CLI # -> USAGE (exit 0) +./CLI usage # -> USAGE +./CLI -h # -> HELP +./CLI --help # -> HELP +./CLI help # -> HELP +./CLI help WG # -> WG topic help (or full HELP if topic unknown) +./CLI example # -> EXAMPLE +./CLI version # -> 0.1.4 +./CLI -V # -> 0.1.4 + diff --git a/developer/manager/test_0.sh b/developer/manager/test_0.sh new file mode 100755 index 0000000..ac354d3 --- /dev/null +++ b/developer/manager/test_0.sh @@ -0,0 +1,11 @@ +set -x +./subu.py # -> USAGE (exit 0) +./subu.py usage # -> USAGE +./subu.py -h # -> HELP +./subu.py --help # -> HELP +./subu.py help # -> HELP +./subu.py help WG # -> WG topic help (or full HELP if topic unknown) +./subu.py example # -> EXAMPLE +./subu.py version # -> 0.1.4 +./subu.py -V # -> 0.1.4 +set +x diff --git a/developer/manager/test_0_expected.sh b/developer/manager/test_0_expected.sh new file mode 100644 index 0000000..8e31ed5 --- /dev/null +++ b/developer/manager/test_0_expected.sh @@ -0,0 +1,353 @@ +++ ./subu.py +usage: subu [-V] [] + +Quick verbs: + usage Show this usage summary + help [topic] Detailed help; same as -h / --help + example End-to-end example session + version Print version + +Main verbs: + init Initialize a new subu database (refuses if it exists) + create Create a minimal subu record (defaults only) + info | information Show details for a subu + WG WireGuard object operations + attach Attach a WG object to a subu (netns + cgroup/eBPF) + detach Detach WG from a subu + network Bring all attached ifaces up/down inside the subu netns + lo Bring loopback up/down inside the subu netns + option Persisted options (list/set/get for future policy) + exec Run a command inside the subu netns + +Tip: `subu help` (or `subu --help`) shows detailed help; `subu help WG` shows topic help. +++ ./subu.py usage +usage: subu [-V] [] + +Quick verbs: + usage Show this usage summary + help [topic] Detailed help; same as -h / --help + example End-to-end example session + version Print version + +Main verbs: + init Initialize a new subu database (refuses if it exists) + create Create a minimal subu record (defaults only) + info | information Show details for a subu + WG WireGuard object operations + attach Attach a WG object to a subu (netns + cgroup/eBPF) + detach Detach WG from a subu + network Bring all attached ifaces up/down inside the subu netns + lo Bring loopback up/down inside the subu netns + option Persisted options (list/set/get for future policy) + exec Run a command inside the subu netns + +Tip: `subu help` (or `subu --help`) shows detailed help; `subu help WG` shows topic help. +++ ./subu.py -h +subu — manage subu containers, namespaces, and WG attachments + +2.1 Core + + subu init + Create ./subu.db (tables: subu, wg, links, options, state). + Requires a 6-char token (e.g., dzkq7b). Refuses if DB already exists. + + subu create + Make a default subu with netns ns- containing lo only (down). + Returns subu_N. + + subu list + Columns: Subu_ID, Owner, Name, NetNS, WG_Attached?, Up/Down, Steer? + + subu info | subu information + Full record + attached WG(s) + options + iface states. + +2.2 Loopback + + subu lo up | subu lo down + Toggle loopback inside the subu’s netns. + +2.3 WireGuard objects (independent) + + subu WG global + e.g., 192.168.112.0/24; allocator hands out /32 peers sequentially. + Shows current base and next free on success. + + subu WG create + Creates WG object; allocates next /32 local IP; AllowedIPs=0.0.0.0/0. + Returns WG_M. + + subu WG server_provided_public_key + Stores server’s pubkey. + + subu WG info | subu WG information + Endpoint, allocated IP, pubkey set?, link state (admin/oper). + +2.4 Link WG ↔ subu, bring up/down + + subu attach WG + Creates/configures WG device inside ns-: + - device name: subu_ (M from WG_ID) + - set local /32, MTU 1420, accept_local=1 + - (no default route is added — steering uses eBPF) + - v1: enforce one WG per Subu; error if another attached + + subu detach WG + Remove WG device/config from the subu’s netns; keep WG object. + + subu WG up | subu WG down + Toggle interface admin state in the subu’s netns (must be attached). + + subu network up | subu network down + Only toggles admin state for all attached ifaces. On “up”, loopback + is brought up first automatically. No route manipulation. + +2.5 Execution & (future) steering + + subu exec -- … + Run a process inside the subu’s netns. + + subu steer enable | subu steer disable + (Future) Attach/detach eBPF cgroup programs to force SO_BINDTOIFINDEX=subu_ + for TCP/UDP. Default: disabled. + +2.6 Options (persist only, for future policy) + + subu option list + subu option get [name] + subu option set + +2.7 Meta + + subu usage + Short usage summary (also printed when no args are given). + + subu help [topic] + This help (or per-topic help such as `subu help WG`). + + subu example + A concrete end-to-end scenario. + + subu version + Print version (same as -V / --version). +++ ./subu.py --help +subu — manage subu containers, namespaces, and WG attachments + +2.1 Core + + subu init + Create ./subu.db (tables: subu, wg, links, options, state). + Requires a 6-char token (e.g., dzkq7b). Refuses if DB already exists. + + subu create + Make a default subu with netns ns- containing lo only (down). + Returns subu_N. + + subu list + Columns: Subu_ID, Owner, Name, NetNS, WG_Attached?, Up/Down, Steer? + + subu info | subu information + Full record + attached WG(s) + options + iface states. + +2.2 Loopback + + subu lo up | subu lo down + Toggle loopback inside the subu’s netns. + +2.3 WireGuard objects (independent) + + subu WG global + e.g., 192.168.112.0/24; allocator hands out /32 peers sequentially. + Shows current base and next free on success. + + subu WG create + Creates WG object; allocates next /32 local IP; AllowedIPs=0.0.0.0/0. + Returns WG_M. + + subu WG server_provided_public_key + Stores server’s pubkey. + + subu WG info | subu WG information + Endpoint, allocated IP, pubkey set?, link state (admin/oper). + +2.4 Link WG ↔ subu, bring up/down + + subu attach WG + Creates/configures WG device inside ns-: + - device name: subu_ (M from WG_ID) + - set local /32, MTU 1420, accept_local=1 + - (no default route is added — steering uses eBPF) + - v1: enforce one WG per Subu; error if another attached + + subu detach WG + Remove WG device/config from the subu’s netns; keep WG object. + + subu WG up | subu WG down + Toggle interface admin state in the subu’s netns (must be attached). + + subu network up | subu network down + Only toggles admin state for all attached ifaces. On “up”, loopback + is brought up first automatically. No route manipulation. + +2.5 Execution & (future) steering + + subu exec -- … + Run a process inside the subu’s netns. + + subu steer enable | subu steer disable + (Future) Attach/detach eBPF cgroup programs to force SO_BINDTOIFINDEX=subu_ + for TCP/UDP. Default: disabled. + +2.6 Options (persist only, for future policy) + + subu option list + subu option get [name] + subu option set + +2.7 Meta + + subu usage + Short usage summary (also printed when no args are given). + + subu help [topic] + This help (or per-topic help such as `subu help WG`). + + subu example + A concrete end-to-end scenario. + + subu version + Print version (same as -V / --version). +++ ./subu.py help +subu — manage subu containers, namespaces, and WG attachments + +2.1 Core + + subu init + Create ./subu.db (tables: subu, wg, links, options, state). + Requires a 6-char token (e.g., dzkq7b). Refuses if DB already exists. + + subu create + Make a default subu with netns ns- containing lo only (down). + Returns subu_N. + + subu list + Columns: Subu_ID, Owner, Name, NetNS, WG_Attached?, Up/Down, Steer? + + subu info | subu information + Full record + attached WG(s) + options + iface states. + +2.2 Loopback + + subu lo up | subu lo down + Toggle loopback inside the subu’s netns. + +2.3 WireGuard objects (independent) + + subu WG global + e.g., 192.168.112.0/24; allocator hands out /32 peers sequentially. + Shows current base and next free on success. + + subu WG create + Creates WG object; allocates next /32 local IP; AllowedIPs=0.0.0.0/0. + Returns WG_M. + + subu WG server_provided_public_key + Stores server’s pubkey. + + subu WG info | subu WG information + Endpoint, allocated IP, pubkey set?, link state (admin/oper). + +2.4 Link WG ↔ subu, bring up/down + + subu attach WG + Creates/configures WG device inside ns-: + - device name: subu_ (M from WG_ID) + - set local /32, MTU 1420, accept_local=1 + - (no default route is added — steering uses eBPF) + - v1: enforce one WG per Subu; error if another attached + + subu detach WG + Remove WG device/config from the subu’s netns; keep WG object. + + subu WG up | subu WG down + Toggle interface admin state in the subu’s netns (must be attached). + + subu network up | subu network down + Only toggles admin state for all attached ifaces. On “up”, loopback + is brought up first automatically. No route manipulation. + +2.5 Execution & (future) steering + + subu exec -- … + Run a process inside the subu’s netns. + + subu steer enable | subu steer disable + (Future) Attach/detach eBPF cgroup programs to force SO_BINDTOIFINDEX=subu_ + for TCP/UDP. Default: disabled. + +2.6 Options (persist only, for future policy) + + subu option list + subu option get [name] + subu option set + +2.7 Meta + + subu usage + Short usage summary (also printed when no args are given). + + subu help [topic] + This help (or per-topic help such as `subu help WG`). + + subu example + A concrete end-to-end scenario. + + subu version + Print version (same as -V / --version). +++ ./subu.py help WG +usage: subu WG [-h] + +options: + -h, --help show this help message and exit +++ ./subu.py example +# 0) Safe init (refuses if ./subu.db exists) +subu init dzkq7b +# -> created ./subu.db + +# 1) Create Subu +subu create Thomas US +# -> Subu_ID: subu_7 +# -> netns: ns-subu_7 with lo (down) + +# 2) Define WG pool (once per host) +subu WG global 192.168.112.0/24 +# -> base set; next free: 192.168.112.2/32 + +# 3) Create WG object with endpoint +subu WG create ReasoningTechnology.com:51820 +# -> WG_ID: WG_0 +# -> local IP: 192.168.112.2/32 +# -> AllowedIPs: 0.0.0.0/0 + +# 4) Add server public key +subu WG server_provided_public_key WG_0 ABCDEFG...xyz= +# -> saved + +# 5) Attach WG to Subu (device created/configured in ns) +subu attach WG subu_7 WG_0 +# -> device ns-subu_7/subu_0 configured (no default route) + +# 6) Bring network up (lo first, then attached ifaces) +subu network up subu_7 +# -> lo up; subu_0 admin up + +# 7) Start the WG engine inside the netns +subu WG up WG_0 +# -> up, handshakes should start + +# 8) Test from inside the subu +subu exec subu_7 -- curl -4v https://ifconfig.me +++ ./subu.py version +0.1.3 +++ ./subu.py -V +0.1.3 +++ set +x diff --git a/developer/manager/text.py b/developer/manager/text.py new file mode 100644 index 0000000..84f6762 --- /dev/null +++ b/developer/manager/text.py @@ -0,0 +1,109 @@ +# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*- +VERSION = "0.2.0" + +USAGE = """\ +subu — Subu manager (v0.2.0) + +Usage: + subu # usage + subu help # detailed help + subu example # example workflow + subu version # print version + + subu init + subu create + subu list + subu info | subu information + + subu lo up|down + + subu WG global + subu WG create + subu WG server_provided_public_key + subu WG info|information + subu WG up + subu WG down + + subu attach WG + subu detach WG + + subu network up|down + + subu option set + subu option get + subu option list + + subu exec -- ... +""" + +HELP = """\ +Subu manager (v0.2.0) + +1) Init + subu init + Creates ./subu.db. Refuses to run if db exists. + +2) Subu + subu create + subu list + subu info + +3) Loopback + subu lo up|down + +4) WireGuard objects (independent of subu) + subu WG global # e.g., 192.168.112.0/24 + subu WG create # allocates next /32 + subu WG server_provided_public_key + subu WG info + subu WG up / subu WG down # admin toggle after attached + +5) Attach/detach + eBPF steering + subu attach WG + - Creates WG dev as subu_ inside ns-subu_, assigns /32, MTU 1420 + - Installs per-subu cgroup + loads eBPF scaffold (UID check, metadata map) + - Keeps device admin-down until `subu network up` + subu detach WG + - Deletes device, removes cgroup + BPF + +6) Network aggregate + subu network up|down + - Ensures lo up on 'up', toggles attached WG ifaces + +7) Options + subu option set|get|list ... + +8) Exec + subu exec -- ... +""" + +EXAMPLE = """\ +# 0) Init +subu init dzkq7b + +# 1) Create Subu +subu create Thomas US +# -> subu_1 + +# 2) WG pool once +subu WG global 192.168.112.0/24 + +# 3) Create WG object with endpoint +subu WG create ReasoningTechnology.com:51820 +# -> WG_1 + +# 4) Pubkey (placeholder) +subu WG server_provided_public_key WG_1 ABCDEFG...xyz= + +# 5) Attach device and install cgroup+BPF steering +subu attach WG subu_1 WG_1 + +# 6) Bring network up (lo + WG) +subu network up subu_1 + +# 7) Test inside ns +subu exec subu_1 -- curl -4v https://ifconfig.me +""" + +def VERSION_string(): + return VERSION diff --git a/developer/manager/worker_bpf.py b/developer/manager/worker_bpf.py new file mode 100644 index 0000000..96aef14 --- /dev/null +++ b/developer/manager/worker_bpf.py @@ -0,0 +1,78 @@ +# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*- +""" +worker_bpf.py — create per-subu cgroups and load eBPF (MVP) +Version: 0.2.0 +""" +import os, subprocess, json +from pathlib import Path + +class BpfError(RuntimeError): pass + +def run(cmd, check=True): + r = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + if check and r.returncode != 0: + raise BpfError(f"cmd failed: {' '.join(cmd)}\n{r.stderr}") + return r.stdout.strip() + +def ensure_mounts(): + # ensure bpf and cgroup v2 are mounted + try: + Path("/sys/fs/bpf").mkdir(parents=True, exist_ok=True) + run(["mount","-t","bpf","bpf","/sys/fs/bpf"], check=False) + except Exception: + pass + try: + Path("/sys/fs/cgroup").mkdir(parents=True, exist_ok=True) + run(["mount","-t","cgroup2","none","/sys/fs/cgroup"], check=False) + except Exception: + pass + +def cgroup_path(subu_id: str) -> str: + return f"/sys/fs/cgroup/{subu_id}" + +def install_steering(subu_id: str, netns: str, ifname: str): + ensure_mounts() + cg = Path(cgroup_path(subu_id)) + cg.mkdir(parents=True, exist_ok=True) + + # compile BPF + obj = Path("./bpf_force_egress.o") + src = Path("./bpf_force_egress.c") + if not src.exists(): + raise BpfError("bpf_force_egress.c missing next to manager") + + # Build object (requires clang/llc/bpftool) + run(["clang","-O2","-g","-target","bpf","-c",str(src),"-o",str(obj)]) + + # Load program into bpffs; attach to cgroup/inet4_connect + inet4_post_bind (MVP) + pinned = f"/sys/fs/bpf/{subu_id}_egress" + run(["bpftool","prog","loadall",str(obj),pinned], check=True) + + # Attach to hooks (MVP validation hooks) + # NOTE: these are safe no-ops for now; they validate UID and stash ifindex map. + for hook in ("cgroup/connect4","cgroup/post_bind4"): + run(["bpftool","cgroup","attach",cgroup_path(subu_id),"attach",hook,"pinned",f"{pinned}/prog_0"], check=False) + + # Write metadata for ifname (saved for future prog versions) + meta = {"ifname": ifname} + Path(f"/sys/fs/bpf/{subu_id}_meta.json").write_text(json.dumps(meta)) + +def remove_steering(subu_id: str): + cg = cgroup_path(subu_id) + # Detach whatever is attached + for hook in ("cgroup/connect4","cgroup/post_bind4"): + subprocess.run(["bpftool","cgroup","detach",cg,"detach",hook], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + # Remove pinned prog dir + pinned = Path(f"/sys/fs/bpf/{subu_id}_egress") + if pinned.exists(): + subprocess.run(["bpftool","prog","detach",str(pinned)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + try: + for p in pinned.glob("*"): p.unlink() + pinned.rmdir() + except Exception: + pass + # Remove cgroup dir + try: + Path(cg).rmdir() + except Exception: + pass diff --git a/developer/mount/README.org b/developer/mount/README.org new file mode 100644 index 0000000..73fd09f --- /dev/null +++ b/developer/mount/README.org @@ -0,0 +1,149 @@ + +When the master users are not remote mounted, but rather have static home directories on the local machine, these scripts are not needed. + +masu == master user +subu == sub user + +These 'mount' scripts are for keeping master users on a remote, optionally encrypted, device. + +I have one example running, so there might be generalization issues with these scripts. + +The order and type of the command arguments are the first part of each +command name. The command description follows. For example: + +`map_name__mounted_masu_list.sh` + +is given a `map_name` and provides a list of the masu found at the mount point with that name. (Note, that /dev/mapper/ is mounted at /mnt/.) + + +For mounting a remote device. + +The remote device has at the top level master user home directories. Each master user home directory has a sub directory called subu_data. Each directory in subu_data is owned by a sub-user, and is the home directory for that sub-user. Each master user also has a `subu` sub-directory. The contents of the subu directory will parallel that of the subu_data directory, but differ in that the contained files will be owned by the master user. + +1. the remote device is mounted under /mnt/map_name + + `device_mapname__open_mount.sh` ` + + `mapname` is the name that appears under /dev/mapper, it is also used + as the /mnt/ mount point. + + if is not to be opened with cryptsetup, instead mount it to the mount pt directly. + +2. Each master user is mount --bind from the /mnt/ to /home + + masu__to_home.sh + + There is currently no script to map bind all the masu in one call. + +3. master users have subu home directories under /home//subu_data. These + are owned by the subu users. Map the /home//subu_data directories to + `/home/subu`n using + + masu__map_own_all.sh + + The `/home/subu` directories are bindfs mapped to be be owned by the master + user, so that the master user may administer them. + + + +* bindfs problem + + On my Fedora install once it was bound by root, it stayed there. On + Debian it keeps losing the mounts, for sure on a logout or login, + and maybe other times also --- the machinectl logins and logouts I + bet that is what is doing it. Hence very time I go to use a mount + in subu it is not there or filled with the dreaded question mark + garbage, has to be umount then remounted. + + -------------------------------------------------------------------------------- + You're exactly right in your diagnosis: Debian's behavior under + systemd with machinectl is unmounting user bind mounts on + login/logout cycles—especially when using enable-linger, systemd + --user, or machinectl shell. + + This differs from Fedora, which preserves those mounts more consistently unless explicitly torn down. Let’s first address why it’s happening, then provide a robust, recoverable fix. + 🔍 Why This Happens + + When machinectl or loginctl manages a user session: + + systemd creates a private mount namespace per user. + + Any mounts (especially bind mounts) not made within that + namespace or not managed by systemd itself (i.e. not via .mount + units) can be: + + Torn down on logout, + + Hidden due to namespace isolation, + + Disconnected if user lingering is toggled. + + If your bindfs mount is outside the user’s namespace (i.e. mounted by root), but then accessed inside a user-managed session, systemd may unmount it or lose visibility. + ✅ Recommended Fix + + There are two paths: + A. Use a systemd .mount unit (most stable) + + Create a file /etc/systemd/system/home-Thomas-subu-.mount: + + [Unit] + Description=Bind mount subu user home + + [Mount] + What=/home/Thomas/subu_data/ + Where=/home/Thomas/subu/ + Type=none + Options=bind + + [Install] + WantedBy=multi-user.target + + Enable and start it: + + sudo systemctl daemon-reexec + sudo systemctl enable --now home-Thomas-subu-.mount + + Repeat for each subu. + + This ensures systemd manages the bind mount in its native way and it won’t get torn down. + B. Remount automatically on login via ~/.bash_profile or a systemd --user unit + + You can modify your script to: + + Skip sudo if already root + + Be idempotent + + Automatically remount on session start + + Then in ~/.bash_profile or ~/.config/systemd/user/subu-mount.service: + + [Unit] + Description=Re-mount subu directory for Thomas + + [Service] + Type=oneshot + ExecStart=/home/Thomas/executable/subu/user_open_subu.sh Thomas US + + [Install] + WantedBy=default.target + + Enable with: + + systemctl --user daemon-reexec + systemctl --user enable --now subu-mount.service + + This guarantees it will come up as part of your login, even after a reboot. + 🔄 Temporary Workaround + + If you want a quick way to check all subu are mounted and remount them if not, write a watchdog script: + + #!/bin/bash + for subu in $(ls /home/Thomas/subu_data); do + mountpoint="/home/Thomas/subu/$subu" + if ! mountpoint -q "$mountpoint"; then + /home/Thomas/executable/subu/user_open_subu.sh Thomas "$subu" + fi + done + + Would you like me to auto-generate the .mount or .service files for each subu? We can script the system-wide setup cleanly. diff --git a/developer/mount/device_mapname__open_mount.sh b/developer/mount/device_mapname__open_mount.sh new file mode 100755 index 0000000..b14ebe3 --- /dev/null +++ b/developer/mount/device_mapname__open_mount.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +if [[ $EUID -ne 0 ]]; then + echo "❌ This script must be run as root." >&2 + exit 1 +fi + +# Function to unlock and mount the device +device_mount() { + local device_node=$1 # e.g., /dev/sdb1 + local device_name=$2 # e.g., Zathustra + local mount_point="/mnt/$device_name" + + # Check if cryptsetup is installed + if ! command -v cryptsetup &> /dev/null; then + echo "Error: cryptsetup is not installed!" + return 1 + fi + + # Check if the device is already mounted + if mount | grep "on $mount_point" > /dev/null; then + echo "Device $device_name is already mounted at $mount_point." + return 0 + fi + + # Make sure the mount point exists + mkdir -p "$mount_point" + + # Unlock the encrypted device + sudo cryptsetup luksOpen "$device_node" "$device_name-crypt" + + # Mount the unlocked device + sudo mount "/dev/mapper/$device_name-crypt" "$mount_point" + + echo "$device_name mounted at $mount_point" +} + +# Run the function with the device node and device name as arguments +device_mount "$1" "$2" diff --git a/developer/mount/device_umount.sh b/developer/mount/device_umount.sh new file mode 100755 index 0000000..3eadc37 --- /dev/null +++ b/developer/mount/device_umount.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +if [[ $EUID -ne 0 ]]; then + echo "❌ This script must be run as root." >&2 + exit 1 +fi + +umount /mnt/"$1" +cryptsetup close "$1" diff --git a/developer/mount/disable_linger.sh b/developer/mount/disable_linger.sh new file mode 100755 index 0000000..a9fffdd --- /dev/null +++ b/developer/mount/disable_linger.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# disable_linger_subu — turn off systemd --user lingering for all -* users +# Usage: sudo disable_linger_subu --masu Thomas + +set -euo pipefail +MASU="" +while [[ $# -gt 0 ]]; do + case "$1" in + --masu) MASU="${2:-}"; shift 2;; + *) echo "unknown arg: $1" >&2; exit 2;; + esac +done +[[ -n "$MASU" ]] || { echo "usage: sudo $0 --masu "; exit 2; } +[[ $EUID -eq 0 ]] || { echo "must run as root"; exit 1; } + +mapfile -t SUBU_USERS < <(getent passwd | awk -F: -v pfx="^${MASU}-" '$1 ~ pfx {print $1}' | sort) +for u in "${SUBU_USERS[@]}"; do + echo "loginctl disable-linger $u" + loginctl disable-linger "$u" || true +done + +echo "Current linger files (should be empty or only intentional users):" +ls -1 /var/lib/systemd/linger 2>/dev/null || echo "(none)" +echo "✅ linger disabled for ${#SUBU_USERS[@]} users" diff --git a/developer/mount/logout_subu.sh b/developer/mount/logout_subu.sh new file mode 100755 index 0000000..9e95cb2 --- /dev/null +++ b/developer/mount/logout_subu.sh @@ -0,0 +1,122 @@ +#!/usr/bin/env bash +# logout_subu — cleanly stop subu users, tear down bindfs, unbind /home, unmount device, close LUKS +# Usage: +# sudo logout_subu --masu Thomas --device Eagle [--aggressive] [--dry-run] +# +# Notes: +# - Run from a directory NOT under /home/ (we'll auto 'cd /' if needed). +# - --aggressive enables pkill -KILL fallback if user@ sessions don't exit. +# - --device is the mapname mounted at /mnt/ and /dev/mapper/-crypt. + +set -euo pipefail + +MASU="" +DEVICE="" +AGGR=0 +DRY=0 + +while [[ $# -gt 0 ]]; do + case "$1" in + --masu) MASU="${2:-}"; shift 2;; + --device) DEVICE="${2:-}"; shift 2;; + --aggressive) AGGR=1; shift;; + --dry-run) DRY=1; shift;; + -h|--help) + grep -E '^(# |#-)' "$0" | sed 's/^# \{0,1\}//' + exit 0;; + *) echo "unknown arg: $1" >&2; exit 2;; + esac +done + +if [[ -z "$MASU" ]]; then + # best guess: current sudo user or login user + MASU="${SUDO_USER:-${USER:-}}" + [[ -n "$MASU" ]] || { echo "Set --masu "; exit 2; } +fi + +if [[ $EUID -ne 0 ]]; then + echo "❌ must run as root (sudo)"; exit 1 +fi + +# If we’re under /home/, move away so unmount can succeed +if [[ "$(pwd -P)" == /home/${MASU}* ]]; then + echo "cd / (leaving $(pwd -P) so unmounts can proceed)" + [[ $DRY -eq 1 ]] || cd / +fi + +say() { printf '%s\n' "$*"; } +doit() { echo "+ $*"; [[ $DRY -eq 1 ]] || eval "$@"; } + +# --- enumerate subu users and mountpoints +SUBU_ROOT="/home/${MASU}/subu" +SUBU_DATA="/home/${MASU}/subu_data" + +# Users of the form MASU-something that actually exist +mapfile -t SUBU_USERS < <(getent passwd | awk -F: -v pfx="^${MASU}-" '$1 ~ pfx {print $1}' | sort) + +# Bindfs targets (reverse depth for unmount) +mapfile -t SUBU_MPS < <(findmnt -Rn -S fuse.* -T "$SUBU_ROOT" -o TARGET 2>/dev/null | \ + awk -F/ '{print NF, $0}' | sort -rn | cut -d" " -f2-) + +say "== stop subu systemd user managers ==" +for u in "${SUBU_USERS[@]}"; do + say "terminating user@ for $u" + doit loginctl terminate-user "$u" || true +done + +# wait a moment and optionally KILL leftovers +sleep 0.5 +for u in "${SUBU_USERS[@]}"; do + if loginctl list-users --no-legend | awk '{print $2}' | grep -qx "$u"; then + if [[ $AGGR -eq 1 ]]; then + uid="$(id -u "$u" 2>/dev/null || echo "")" + if [[ -n "$uid" ]]; then + say "aggressive kill of UID $uid ($u)" + doit pkill -KILL -u "$uid" || true + fi + else + say "⚠︎ $u still has a user@ manager; rerun with --aggressive to force-kill" + fi + fi +done + +say "== unmount bindfs subu mounts under $SUBU_ROOT ==" +for mp in "${SUBU_MPS[@]}"; do + say "umount $mp" + if [[ $DRY -eq 1 ]]; then + echo "+ umount '$mp'" + else + if ! umount "$mp" 2>/dev/null; then + echo " (busy) trying lazy umount" + umount -l "$mp" || true + fi + fi +done + +# Unmount the MASU home if it is a bind of /mnt//user_data/ +say "== unmount MASU home bind (if any) ==" +if findmnt -n -T "/home/${MASU}" >/dev/null 2>&1; then + src="$(findmnt -no SOURCE -T "/home/${MASU}")" + say "/home/${MASU} source: ${src}" + say "umount /home/${MASU}" + doit umount "/home/${MASU}" || true +fi + +# If a device mapname was provided, unmount and close it +if [[ -n "$DEVICE" ]]; then + say "== unmount /mnt/${DEVICE} and close LUKS ==" + if findmnt -n "/mnt/${DEVICE}" >/dev/null 2>&1; then + say "umount /mnt/${DEVICE}" + doit umount "/mnt/${DEVICE}" || true + fi + if cryptsetup status "${DEVICE}-crypt" >/dev/null 2>&1; then + say "cryptsetup close ${DEVICE}-crypt" + doit cryptsetup close "${DEVICE}-crypt" || true + else + say "crypt mapping ${DEVICE}-crypt not active" + fi +fi + +say "sync disks" +[[ $DRY -eq 1 ]] || sync +say "✅ done" diff --git a/developer/mount/mapname__mounted_masu_list.sh b/developer/mount/mapname__mounted_masu_list.sh new file mode 100755 index 0000000..b955b07 --- /dev/null +++ b/developer/mount/mapname__mounted_masu_list.sh @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +# Function to list users in the /mnt//user_data directory +device_user_list() { + local device=$1 + local user_data_dir="/mnt/$device/user_data" + + if [ ! -d "$user_data_dir" ]; then + echo "Error: $user_data_dir does not exist!" + return 1 + fi + + # List all user directories in the user_data directory + find "$user_data_dir" -maxdepth 1 -mindepth 1 -type d -exec basename {} \; +} + +# Run the function with the device name as an argument +device_user_list "$1" diff --git a/developer/mount/mapname_masu__to_home.sh b/developer/mount/mapname_masu__to_home.sh new file mode 100755 index 0000000..b776626 --- /dev/null +++ b/developer/mount/mapname_masu__to_home.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# Function to bind mount a user's data to /home/ +device_user_bind() { + local device=$1 + local user=$2 + local user_data_dir="/mnt/$device/user_data/$user" + local home_dir="/home/$user" + + if [ ! -d "$user_data_dir" ]; then + echo "Error: $user_data_dir does not exist!" + return 1 + fi + + # Create the home directory if it doesn't exist + mkdir -p "$home_dir" + + # Mount --bind the user data to the home directory + sudo mount --bind "$user_data_dir" "$home_dir" + echo "Mounted $user_data_dir -> $home_dir" +} + +# Run the function with the device name and user as arguments +device_user_bind "$1" "$2" diff --git a/developer/mount/masu__map_own_all.sh b/developer/mount/masu__map_own_all.sh new file mode 100755 index 0000000..0cdaa5b --- /dev/null +++ b/developer/mount/masu__map_own_all.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# usage: sudo ./masu__map_own_all.sh [--suid=US,x6] +set -euo pipefail +masu="${1:?usage: $0 [--suid=a,b]}" +suid_list="${2-}" + +want_suid_for() { + [[ "$suid_list" =~ ^--suid= ]] || return 1 + IFS=',' read -r -a arr <<< "${suid_list#--suid=}" + for n in "${arr[@]}"; do [[ "$n" == "$1" ]] && return 0; done + return 1 +} + +subus="$(./masu__subu_data_dir_list.sh "$masu")" +[[ -n "$subus" ]] || { echo "No sub-users found for $masu"; exit 1; } + +while IFS= read -r s; do + [[ -n "$s" ]] || continue + echo "Opening sub-user: $s" + if want_suid_for "$s"; then + sudo ./masu_subu__map_own.sh "$masu" "$s" --suid + else + sudo ./masu_subu__map_own.sh "$masu" "$s" + fi +done <<< "$subus" diff --git a/developer/mount/masu__subu_data_dir_list.sh b/developer/mount/masu__subu_data_dir_list.sh new file mode 100755 index 0000000..de4f9a0 --- /dev/null +++ b/developer/mount/masu__subu_data_dir_list.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# Function to list sub-users in /home//subu_data +subu_home_dir_list() { + local user=$1 + local subu_home_dir="/home/$user/subu_data" + + if [ ! -d "/home/$user" ]; then + echo "Error: /home/$user does not exist!" + return 1 + fi + + if [ ! -d "$subu_home_dir" ]; then + echo "Error: $subu_home_dir does not exist!" + return 1 + fi + + # List all sub-users in the subu directory + find "$subu_home_dir" -maxdepth 1 -mindepth 1 -type d -exec basename {} \; +} + +# Run the function with the user as an argument +subu_home_dir_list "$1" diff --git a/developer/mount/masu__subu_dir_list.sh b/developer/mount/masu__subu_dir_list.sh new file mode 100755 index 0000000..383ea57 --- /dev/null +++ b/developer/mount/masu__subu_dir_list.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# masu__subu_dir_list.sh + +set -euo pipefail +user="${1:?usage: $0 }" + +# Prefer the /home//subu view; if empty/nonexistent, fall back to subu_data. +list_from_dir() { local d="$1"; [[ -d "$d" ]] && find "$d" -mindepth 1 -maxdepth 1 -type d -printf '%f\n' || true; } + +candidates="$( + list_from_dir "/home/$user/subu" + [[ -d "/home/$user/subu" && -n "$(ls -A /home/$user/subu 2>/dev/null || true)" ]] || list_from_dir "/home/$user/subu_data" +)" + +# Unique, stable order +printf '%s\n' "$candidates" | LC_ALL=C sort -u diff --git a/developer/mount/masu_subu__map_own.sh b/developer/mount/masu_subu__map_own.sh new file mode 100755 index 0000000..358f7fe --- /dev/null +++ b/developer/mount/masu_subu__map_own.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# usage: sudo ./masu_subu__map_own.sh [--suid] +set -euo pipefail + +masu="${1:?usage: $0 [--suid]}" +subu="${2:?usage: $0 [--suid]}" +want_suid=0; [[ "${3-}" == "--suid" ]] && want_suid=1 + +need(){ command -v "$1" >/dev/null 2>&1 || { echo "missing: $1" >&2; exit 1; }; } +need bindfs; need findmnt; need umount + +src="/home/$masu/subu_data/$subu" +mp="/home/$masu/subu/$subu" +[[ -d "$src" ]] || { echo "❌ source not found: $src" >&2; exit 1; } +mkdir -p "$mp" + +# mount options +base_opts="allow_other,default_permissions,exec" +opts="$base_opts,$([[ $want_suid -eq 1 ]] && echo suid || echo nosuid)" + +# fully unstack any prior bindfs at the target +while findmnt -rn -T "$mp" -t fuse.bindfs >/dev/null 2>&1; do + umount "$mp" 2>/dev/null || umount -l "$mp" || break + sleep 0.1 +done + +echo "mounting $src -> $mp (opts: $opts)" +bindfs -o "$opts" --map="${masu}-${subu}/${masu}:@${masu}-${subu}/@${masu}" "$src" "$mp" + +# verify (single line, kernel-only) +findmnt -rn -T "$mp" -S "$src" -o TARGET,SOURCE,FSTYPE,OPTIONS | head -n1 +echo "OK" +if [[ $want_suid -eq 1 ]]; then + echo "note: suid enabled at $mp" +else + echo "note: nosuid (default) — setuid will NOT take effect at $mp" +fi diff --git a/developer/mount/masu_subu__map_own_orig.sh b/developer/mount/masu_subu__map_own_orig.sh new file mode 100644 index 0000000..2a1b1f5 --- /dev/null +++ b/developer/mount/masu_subu__map_own_orig.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +# Function to bind mount with UID/GID mapping +subu_bind() { + local user=$1 + local subu=$2 + + # Check if bindfs is installed + if ! command -v bindfs &> /dev/null; then + echo "Error: bindfs is not installed!" + return 1 + fi + + # Get the username and group name for the main user + master_user_name=$user + master_group=$user + + # Get the username and group name for the sub-user + subu_user_name="${user}-${subu}" + subu_group="${user}-${subu}" + + # Check if the user and sub-user exist + if ! id "$master_user_name" &>/dev/null; then + echo "Error: User '$master_user_name' not found!" + return 1 + fi + if ! id "$subu_user_name" &>/dev/null; then + echo "Error: Sub-user '${master_user_name}-${subu}' not found!" + return 1 + fi + + # Directories to be bind-mounted + subu_data_path="/home/$user/subu_data/$subu" + subu_mount_point_path="/home/$user/subu/$subu" + + # Check if sub-user directory exists + if [ ! -d "$subu_data_path" ]; then + echo "Error: Sub-user directory '$subu_data_path' does not exist!" + return 1 + fi + + # Create the mount point if it doesn't exist + mkdir -p "$subu_mount_point_path" + + # Perform the bind mount using bindfs with UID/GID mapping + sudo bindfs\ + --map="$subu_user_name/$master_user_name:@$subu_group/@$master_group" \ + "$subu_data_path" \ + "$subu_mount_point_path" + + # Verify if the mount was successful + if [ $? -eq 0 ]; then + echo "Successfully bind-mounted $subu_data_path to $subu_mount_point_path with UID/GID mapping." + else + echo "Error: Failed to bind-mount $subu_data_path to $subu_mount_point_path, might already exist." + fi +} + +# Call the function with user and subu as arguments +subu_bind "$1" "$2" diff --git a/developer/mount/masu_subu__uid.sh b/developer/mount/masu_subu__uid.sh new file mode 100755 index 0000000..d3a976e --- /dev/null +++ b/developer/mount/masu_subu__uid.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash + +# Function to lookup the UID of the user and sub-user combination +get_subu_uid() { + local user=$1 + local subu=$2 + + # Concatenate user and sub-user name (no space around the = sign) + local subu_user="${user}-${subu}" + + # Lookup the UID for the sub-user (user-subuser) combination + subu_uid=$(id -u "$subu_user" 2>/dev/null) + + # If found, return only the UID, otherwise return nothing + if [ -n "$subu_uid" ]; then + echo "$subu_uid" + fi +} + +# Call the function with user and subu as arguments +get_subu_uid "$1" "$2" diff --git a/developer/mount/mount_pt_list.sh b/developer/mount/mount_pt_list.sh new file mode 100755 index 0000000..588958d --- /dev/null +++ b/developer/mount/mount_pt_list.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +# Function to list available devices under /mnt, excluding /mnt itself +mount_pt_list() { + # List all directories in /mnt that are potentially available for mounting + find /mnt -mindepth 1 -maxdepth 1 -type d -exec basename {} \; +} + +# Call the function to display available devices +mount_pt_list diff --git a/developer/source/DNS/.gitignore b/developer/source/DNS/.gitignore deleted file mode 100644 index 181003e..0000000 --- a/developer/source/DNS/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ - -__pycache__ -stage/ -deprecated/ -scratchpad/ - diff --git a/developer/source/DNS/10-block-IPv6.nft b/developer/source/DNS/10-block-IPv6.nft deleted file mode 100644 index 2cd6a2b..0000000 --- a/developer/source/DNS/10-block-IPv6.nft +++ /dev/null @@ -1,27 +0,0 @@ -# write /etc/nftables.d/10-block-IPv6.nft — drop all IPv6 -def configure(prov, planner, WriteFileMeta): - wfm = WriteFileMeta( - dpath="/etc/nftables.d", - fname="10-block-IPv6.nft", - owner="root", - mode=0o644, - ) - planner.displace(wfm) - planner.copy(wfm, content="""\ -table inet NO-IPV6 { - chain input { - type filter hook input priority -300; policy accept; - meta nfproto ipv6 counter comment "drop all IPv6 inbound" drop; - } - - chain output { - type filter hook output priority -300; policy accept; - meta nfproto ipv6 counter comment "drop all IPv6 outbound" drop; - } - - chain forward { - type filter hook forward priority -300; policy accept; - meta nfproto ipv6 counter comment "drop all IPv6 forward" drop; - } -} -""") diff --git a/developer/source/DNS/20-SUBU-ports.nft b/developer/source/DNS/20-SUBU-ports.nft deleted file mode 100644 index 6c31446..0000000 --- a/developer/source/DNS/20-SUBU-ports.nft +++ /dev/null @@ -1,47 +0,0 @@ -table inet SUBU-DNS-REDIRECT { - chain output { - type nat hook output priority -100; policy accept; - - # Redirect DNS for the subu UIDs to local Unbound listeners - meta skuid 2017 udp dport 53 redirect to :5301 - meta skuid 2018 udp dport 53 redirect to :5302 - meta skuid 2017 tcp dport 53 redirect to :5301 - meta skuid 2018 tcp dport 53 redirect to :5302 - } -} - -table inet SUBU-PORT-EGRESS { - chain output { - type filter hook output priority 0; policy accept; - - # Always allow loopback on egress - oifname "lo" accept - - # No IPv6 for subu (until you reintroduce v6) - meta skuid {2017,2018} meta nfproto ipv6 counter comment "no IPv6 for subu" drop - - ##### x6 (UID 2018) - # Block some exfil channels regardless of iface - meta skuid 2018 tcp dport {25,465,587} counter comment "block SMTP/Submission" drop - meta skuid 2018 udp dport {3478,5349,19302-19309} counter comment "block STUN/TURN" drop - meta skuid 2018 tcp dport 853 counter comment "block DoT (TCP/853)" drop - - # (Optional) allow ICMP echo out via x6 - meta skuid 2018 oifname "x6" ip protocol icmp icmp type echo-request accept - - # Enforce interface binding - meta skuid 2018 oifname "x6" accept - meta skuid 2018 oifname != "x6" counter comment "x6 must use wg x6" drop - - ##### US (UID 2017) - meta skuid 2017 tcp dport {25,465,587} counter drop comment "block SMTP/Submission" - meta skuid 2017 udp dport {3478,5349,19302-19309} counter drop comment "block STUN/TURN" - meta skuid 2017 tcp dport 853 counter drop comment "block DoT (TCP/853)" - - # (Optional) ICMP via US - meta skuid 2017 oifname "US" ip protocol icmp icmp type echo-request accept - - meta skuid 2017 oifname "US" accept - meta skuid 2017 oifname != "US" counter comment "US must use wg US" drop - } -} diff --git a/developer/source/DNS/Man_In_Grey_input_acceptance.py b/developer/source/DNS/Man_In_Grey_input_acceptance.py deleted file mode 100644 index 2608280..0000000 --- a/developer/source/DNS/Man_In_Grey_input_acceptance.py +++ /dev/null @@ -1,26 +0,0 @@ -# Man_In_Grey acceptance filter (default template) -# Return True to include a config file ,False to skip it. -# You receive a PlanProvenance object named `prov`. -# -# Common fields: -# prov.stage_root_dpath : Path -# prov.config_abs_fpath : Path -# prov.config_rel_fpath : Path -# prov.read_dir_dpath : Path -# prov.read_fname : str -# -# 1) Accept everything (default): -# def accept(prov): -# return True -# -# 2) Only a namespace: -# def accept(prov): -# return prov.config_rel_fpath.as_posix().startswith("dns/") -# -# 3) Exclude editor junk: -# def accept(prov): -# r = prov.config_rel_fpath.as_posix() -# return not (r.endswith("~") or r.endswith(".swp")) -# -def accept(prov): - return True diff --git a/developer/source/DNS/README.org b/developer/source/DNS/README.org deleted file mode 100644 index 615d155..0000000 --- a/developer/source/DNS/README.org +++ /dev/null @@ -1,91 +0,0 @@ -#+TITLE: DNS Bundle (Unbound + Per-subu Redirect) — RT-v2025.09.15.1 -#+AUTHOR: RT Toolkit -#+OPTIONS: toc:2 -#+STARTUP: show2levels - -* Overview -This bundle stages a *per-subu DNS* setup on the client: -- Two Unbound instances (templated via ~unbound@.service~): - - ~unbound@US~ listens on ~127.0.0.1:5301~, resolves *over the US tunnel* (outgoing interface = ~10.0.0.1~). - - ~unbound@x6~ listens on ~127.0.0.1:5302~, resolves *over the x6 tunnel* (outgoing interface = ~10.8.0.2~). -- nftables rules match the subu’s UID and *redirect TCP/UDP port 53* to the corresponding local Unbound port. -- A small deploy helper (~deploy_DNS.py~) installs the staged tree and enables services. - -* Why this design? -- When a subu (containerized user) does DNS, traffic is forced to the tunnel assigned to that subu. -- If a tunnel is down, DNS for that subu fails closed (no silent leak), while your ~local~ subu can still use ISP DNS. -- No changes to per-user resolv.conf are required: subu keep using ~nameserver 127.0.0.1~ (via redirect). - -* Layout -#+begin_example -DNS_bundle/ - README.org - deploy_DNS.py - stage/ - etc/ - nftables.d/ - DNS-redirect.nft - systemd/ - system/ - DNS-redirect.service - unbound@.service - unbound/ - unbound-US.conf - unbound-x6.conf - usr/ - local/ - sbin/ - DNS_status.sh -#+end_example - -* Assumptions / Customize -- Client WG local addresses (from your earlier setup): - - US: ~10.0.0.1/32~ - - x6: ~10.8.0.2/32~ -- Subu UIDs (adjust if different): - - US → UID ~2017~ - - x6 → UID ~2018~ -- If these differ on your box, edit: - - ~stage/etc/unbound/unbound-US.conf~ (~outgoing-interface~) - - ~stage/etc/unbound/unbound-x6.conf~ (~outgoing-interface~) - - ~stage/etc/nftables.d/DNS-redirect.nft~ (the ~meta skuid~ lines) - -* Deploy -1. Review staged files: - #+begin_src sh - tar tzf DNS_bundle.tgz | sed 's/^/ /' - #+end_src -2. Extract and run deploy (root): - #+begin_src sh - tar xzf DNS_bundle.tgz - cd DNS_bundle - sudo ./deploy_DNS.py --instances US x6 - #+end_src -3. Verify: - #+begin_src sh - systemctl status unbound@US unbound@x6 DNS-redirect - sudo nft list table inet NAT-DNS-REDIRECT - #+end_src - -* How it works -- nftables (~DNS-redirect.nft~) in ~inet~ *nat output* hook rewrites subu DNS to the local listener ports: - - US (UID 2017) → ~127.0.0.1:5301~ - - x6 (UID 2018) → ~127.0.0.1:5302~ -- Each Unbound instance binds to its port and *sources queries from the WG IP* using ~outgoing-interface~. -- Unit ordering ties each instance to its tunnel: ~After=~ and ~Requires=~ ~wg-quick@%i.service~. - -* Notes -- If a tunnel’s address is not present at Unbound start, the unit waits because of the dependency and restarts later. -- For DoT/DoH upstream, you can switch to ~forward-tls-upstream: yes~ with providers that support TLS on 853. -- The ~DNS_status.sh~ helper prints a quick status and the top of logs. - -* Rollback -#+begin_src sh -sudo systemctl disable --now unbound@US unbound@x6 DNS-redirect -sudo nft flush table inet NAT-DNS-REDIRECT || true -# Remove staged files if desired (be careful) -# sudo rm -f /etc/unbound/unbound-US.conf /etc/unbound/unbound-x6.conf -#+end_src - -* License -This bundle is provided “as-is”. Use at your own discretion. diff --git a/developer/source/DNS/doc_howto_install.org b/developer/source/DNS/doc_howto_install.org deleted file mode 100644 index 68476a3..0000000 --- a/developer/source/DNS/doc_howto_install.org +++ /dev/null @@ -1,32 +0,0 @@ - -* 1. modify stage files - - The stage/ directory holds bespoke configuration files for host StanleyPark's configuration. - - Copy/Modify the sraged files for your site. - - Work on the stage is done in user space. The program `sudo install_staged_tree.py` copies the files on the stage into the root file system, `/', or optionally to another specified directory target. However, normally one will run `deploy.py` to do the install and to make the systemctl calls to restart services. - -* 2. edit /etc/nftables.conf - - requires root priv - - Strange, but Debian 12 nftables does not automatically include the scripts in its drop-in directory, so .. - - add this at the bottom of /etc/nftables.conf - - flush ruleset - include "/etc/nftables.d/*.nft" - -* 3. run `deploy.py` - - requires root priv - -* 4. check - - requires root priv - - nft list ruleset | sed -n '/SUBU-/,/}/p' - systemctl status nftables - ss -ltnup 'sport = :5301' 'sport = :5302' # your Unbound listeners - diff --git a/developer/source/DNS/stage-US/10-block-IPv6.nft.py b/developer/source/DNS/stage-US/10-block-IPv6.nft.py deleted file mode 100644 index 2cd6a2b..0000000 --- a/developer/source/DNS/stage-US/10-block-IPv6.nft.py +++ /dev/null @@ -1,27 +0,0 @@ -# write /etc/nftables.d/10-block-IPv6.nft — drop all IPv6 -def configure(prov, planner, WriteFileMeta): - wfm = WriteFileMeta( - dpath="/etc/nftables.d", - fname="10-block-IPv6.nft", - owner="root", - mode=0o644, - ) - planner.displace(wfm) - planner.copy(wfm, content="""\ -table inet NO-IPV6 { - chain input { - type filter hook input priority -300; policy accept; - meta nfproto ipv6 counter comment "drop all IPv6 inbound" drop; - } - - chain output { - type filter hook output priority -300; policy accept; - meta nfproto ipv6 counter comment "drop all IPv6 outbound" drop; - } - - chain forward { - type filter hook forward priority -300; policy accept; - meta nfproto ipv6 counter comment "drop all IPv6 forward" drop; - } -} -""") diff --git a/developer/source/DNS/stage-US/20-SUBU-ports-US.nft.py b/developer/source/DNS/stage-US/20-SUBU-ports-US.nft.py deleted file mode 100644 index c9c8f5e..0000000 --- a/developer/source/DNS/stage-US/20-SUBU-ports-US.nft.py +++ /dev/null @@ -1,51 +0,0 @@ -# write /etc/nftables.d/20-SUBU-ports.nft — DNS redirect + strict egress -def configure(prov, planner, WriteFileMeta): - wfm = WriteFileMeta( - dpath="/etc/nftables.d", - fname="20-SUBU-ports.nft", - owner="root", - mode=0o644, - ) - planner.displace(wfm) - planner.copy(wfm, content="""\ -# DNS per-UID redirect to local Unbound -table inet SUBU-DNS-REDIRECT { - chain output { - type nat hook output priority -100; policy accept; - - # US (uid 2017) -> 127.0.0.1:5301 - meta skuid 2017 udp dport 53 redirect to :5301 - meta skuid 2017 tcp dport 53 redirect to :5301 - # x6 (uid 2018) -> 127.0.0.1:5302 - meta skuid 2018 udp dport 53 redirect to :5302 - meta skuid 2018 tcp dport 53 redirect to :5302 - } -} - -# Egress policy: subu UIDs must use their WireGuard iface; block exfil channels -table inet SUBU-PORT-EGRESS { - chain output { - type filter hook output priority 0; policy accept; - - # Always allow loopback - oifname "lo" accept; - - # No IPv6 for subu (until you reintroduce v6) - meta skuid {2017,2018} meta nfproto ipv6 counter comment "no IPv6 for subu" drop; - - ##### x6 (UID 2018) - meta skuid 2018 tcp dport {25,465,587} counter comment "block SMTP/Submission" drop; - meta skuid 2018 udp dport {3478,5349,19302-19309} counter comment "block STUN/TURN" drop; - meta skuid 2018 tcp dport 853 counter comment "block DoT (TCP/853)" drop; - meta skuid 2018 oifname "x6" accept; - meta skuid 2018 oifname != "x6" counter comment "x6 must use wg x6" drop; - - ##### US (UID 2017) - meta skuid 2017 tcp dport {25,465,587} counter comment "block SMTP/Submission" drop; - meta skuid 2017 udp dport {3478,5349,19302-19309} counter comment "block STUN/TURN" drop; - meta skuid 2017 tcp dport 853 counter comment "block DoT (TCP/853)" drop; - meta skuid 2017 oifname "US" accept; - meta skuid 2017 oifname != "US" counter comment "US must use wg US" drop; - } -} -""") diff --git a/developer/source/DNS/stage-US/30-unbound-US.conf.py b/developer/source/DNS/stage-US/30-unbound-US.conf.py deleted file mode 100644 index 697c38e..0000000 --- a/developer/source/DNS/stage-US/30-unbound-US.conf.py +++ /dev/null @@ -1,42 +0,0 @@ -# write /etc/unbound/unbound-US.conf — local listener that egresses via US WG -def configure(prov, planner, WriteFileMeta): - wfm = WriteFileMeta( - dpath="/etc/unbound", - fname="unbound-US.conf", - owner="root", - mode=0o644, - ) - planner.displace(wfm) - planner.copy(wfm, content="""\ -server: - verbosity: 1 - username: "unbound" - directory: "/etc/unbound" - chroot: "" - - do-ip6: no - do-udp: yes - do-tcp: yes - prefer-ip6: no - - # Listen only on loopback (US instance) - interface: 127.0.0.1@5301 - access-control: 127.0.0.0/8 allow - - # Egress via US tunnel address (policy rules ensure it leaves on wg US) - outgoing-interface: 10.0.0.1 - - # Hardening/cache - hide-identity: yes - hide-version: yes - harden-referral-path: yes - harden-dnssec-stripped: yes - qname-minimisation: yes - aggressive-nsec: yes - prefetch: yes - cache-min-ttl: 60 - cache-max-ttl: 86400 - - # DNSSEC trust anchor - auto-trust-anchor-file: "/var/lib/unbound/root.key" -""") diff --git a/developer/source/DNS/stage-US/50-wg-policy-US.service.py b/developer/source/DNS/stage-US/50-wg-policy-US.service.py deleted file mode 100644 index dc0b153..0000000 --- a/developer/source/DNS/stage-US/50-wg-policy-US.service.py +++ /dev/null @@ -1,18 +0,0 @@ -# /etc/systemd/system/wg-policy-US.service — run after wg-quick@US to install policy rules -def configure(prov, planner, WriteFileMeta): - content = """[Unit] -Description=Policy routing for Unbound egress (US) -After=wg-quick@US.service -Wants=wg-quick@US.service - -[Service] -Type=oneshot -ExecStart=/usr/local/sbin/wg-policy-US.sh -RemainAfterExit=yes - -[Install] -WantedBy=multi-user.target -""" - wfm = WriteFileMeta(dpath="/etc/systemd/system", fname="wg-policy-US.service", owner="root", mode="0644") - planner.displace(wfm) - planner.copy(wfm, content=content) diff --git a/developer/source/DNS/stage-US/51-wg-policy-US.sh.py b/developer/source/DNS/stage-US/51-wg-policy-US.sh.py deleted file mode 100644 index 63c2dd7..0000000 --- a/developer/source/DNS/stage-US/51-wg-policy-US.sh.py +++ /dev/null @@ -1,19 +0,0 @@ -# /usr/local/sbin/wg-policy-US.sh — source-policy routing for Unbound's egress -def configure(prov, planner, WriteFileMeta): - # EDIT if your interface/IP differ: - WG_IFACE = "US" - WG_SRC_IP = "10.0.0.1" - TABLE = 100 - - content = f"""#!/usr/bin/env bash -set -euo pipefail -WG_IFACE="{WG_IFACE}" -WG_SRC_IP="{WG_SRC_IP}" -TABLE={TABLE} - -ip rule replace from "$WG_SRC_IP" lookup "$TABLE" priority 10010 -ip route replace default dev "$WG_IFACE" table "$TABLE" -""" - wfm = WriteFileMeta(dpath="/usr/local/sbin", fname="wg-policy-US.sh", owner="root", mode="0755") - planner.displace(wfm) - planner.copy(wfm, content=content) diff --git a/developer/source/DNS/stage_0/30-dnsredir.nft b/developer/source/DNS/stage_0/30-dnsredir.nft deleted file mode 100644 index 8ab5249..0000000 --- a/developer/source/DNS/stage_0/30-dnsredir.nft +++ /dev/null @@ -1,14 +0,0 @@ -# Redirect DNS traffic per-UID to local Unbound instances. -# US (uid 2017) -> 127.0.0.1:5301 -# x6 (uid 2018) -> 127.0.0.1:5302 -table inet nat { - chain output { - type nat hook output priority -100; - # US - meta skuid 2017 udp dport 53 redirect to :5301 - meta skuid 2017 tcp dport 53 redirect to :5301 - # x6 - meta skuid 2018 udp dport 53 redirect to :5302 - meta skuid 2018 tcp dport 53 redirect to :5302 - } -} diff --git a/developer/source/DNS/stage_0/unbound-US.conf b/developer/source/DNS/stage_0/unbound-US.conf deleted file mode 100644 index 6a799f7..0000000 --- a/developer/source/DNS/stage_0/unbound-US.conf +++ /dev/null @@ -1,40 +0,0 @@ -server: - verbosity: 1 - username: "unbound" - directory: "/etc/unbound" - chroot: "" - - do-ip6: no - do-udp: yes - do-tcp: yes - prefer-ip6: no - - # Listen only on loopback (US instance) - interface: 127.0.0.1@5301 - access-control: 127.0.0.0/8 allow - - # Egress via US tunnel address (policy routing will carry it out the WG table) - outgoing-interface: 10.0.0.1 - - # Sensible hardening/cache - hide-identity: yes - hide-version: yes - harden-referral-path: yes - harden-dnssec-stripped: yes - qname-minimisation: yes - aggressive-nsec: yes - prefetch: yes - cache-min-ttl: 60 - cache-max-ttl: 86400 - - # DNSSEC TA (create with unbound-anchor) - auto-trust-anchor-file: "/var/lib/unbound/root.key" - # Optional root hints (download separately) - # root-hints: "/var/lib/unbound/root.hints" - -# To use forwarding instead of full recursion, uncomment and edit: -# forward-zone: -# name: "." -# forward-tls-upstream: no -# forward-addr: 9.9.9.9 -# forward-addr: 1.1.1.1 diff --git a/developer/source/DNS/stage_0/unbound-x6.conf b/developer/source/DNS/stage_0/unbound-x6.conf deleted file mode 100644 index c34a068..0000000 --- a/developer/source/DNS/stage_0/unbound-x6.conf +++ /dev/null @@ -1,40 +0,0 @@ -server: - verbosity: 1 - username: "unbound" - directory: "/etc/unbound" - chroot: "" - - do-ip6: no - do-udp: yes - do-tcp: yes - prefer-ip6: no - - # Listen only on loopback (x6 instance) - interface: 127.0.0.1@5302 - access-control: 127.0.0.0/8 allow - - # Egress via x6 tunnel address (policy routing will carry it out the WG table) - outgoing-interface: 10.8.0.2 - - # Sensible hardening/cache - hide-identity: yes - hide-version: yes - harden-referral-path: yes - harden-dnssec-stripped: yes - qname-minimisation: yes - aggressive-nsec: yes - prefetch: yes - cache-min-ttl: 60 - cache-max-ttl: 86400 - - # DNSSEC TA (create with unbound-anchor) - auto-trust-anchor-file: "/var/lib/unbound/root.key" - # Optional root hints (download separately) - # root-hints: "/var/lib/unbound/root.hints" - -# To use forwarding instead of full recursion, uncomment and edit: -# forward-zone: -# name: "." -# forward-tls-upstream: no -# forward-addr: 9.9.9.9 -# forward-addr: 1.1.1.1 diff --git a/developer/source/DNS/stage_0/unbound@.service b/developer/source/DNS/stage_0/unbound@.service deleted file mode 100644 index 4fa31d8..0000000 --- a/developer/source/DNS/stage_0/unbound@.service +++ /dev/null @@ -1,20 +0,0 @@ -[Unit] -Description=Unbound DNS (%i) -Documentation=man:unbound(8) -After=network-online.target wg-quick@%i.service -Wants=network-online.target - -[Service] -Type=simple -ExecStart=/usr/sbin/unbound -d -p -c /etc/unbound/unbound-%i.conf -Restart=on-failure -# Lock down a bit -CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETGID CAP_SETUID -AmbientCapabilities=CAP_NET_BIND_SERVICE -NoNewPrivileges=true -PrivateTmp=true -ProtectSystem=full -ProtectHome=true - -[Install] -WantedBy=multi-user.target diff --git a/developer/source/DNS/unbound_US.py b/developer/source/DNS/unbound_US.py deleted file mode 100644 index 4c3f8ba..0000000 --- a/developer/source/DNS/unbound_US.py +++ /dev/null @@ -1,33 +0,0 @@ -# unbound/unbound_US.py -def configure(prov, planner, WriteFileMeta): - conf = """server: - verbosity: 1 - username: "unbound" - directory: "/etc/unbound" - chroot: "" - - do-ip6: no - do-udp: yes - do-tcp: yes - prefer-ip6: no - - interface: 127.0.0.1@5301 - access-control: 127.0.0.0/8 allow - - outgoing-interface: 10.0.0.1 - - hide-identity: yes - hide-version: yes - harden-referral-path: yes - harden-dnssec-stripped: yes - qname-minimisation: yes - aggressive-nsec: yes - prefetch: yes - cache-min-ttl: 60 - cache-max-ttl: 86400 - - auto-trust-anchor-file: "/var/lib/unbound/root.key" -""" - wfm = WriteFileMeta(dpath="/etc/unbound", fname="unbound-US.conf", - owner="root", mode="0644") - planner.copy(wfm, content=conf) diff --git a/developer/source/DNS/unbound_at_template.py b/developer/source/DNS/unbound_at_template.py deleted file mode 100644 index 57326d2..0000000 --- a/developer/source/DNS/unbound_at_template.py +++ /dev/null @@ -1,25 +0,0 @@ -# systemd/unbound_at_template.py -def configure(prov, planner, WriteFileMeta): - service = """[Unit] -Description=Unbound DNS (%i) -Documentation=man:unbound(8) -After=network-online.target wg-quick@%i.service -Wants=network-online.target - -[Service] -Type=simple -ExecStart=/usr/sbin/unbound -d -p -c /etc/unbound/unbound-%i.conf -Restart=on-failure -CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETGID CAP_SETUID -AmbientCapabilities=CAP_NET_BIND_SERVICE -NoNewPrivileges=true -PrivateTmp=true -ProtectSystem=full -ProtectHome=true - -[Install] -WantedBy=multi-user.target -""" - wfm = WriteFileMeta(dpath="/etc/systemd/system", fname="unbound@.service", - owner="root", mode="0644") - planner.copy(wfm, content=service) diff --git a/developer/source/DNS/unbound_x6.py b/developer/source/DNS/unbound_x6.py deleted file mode 100644 index 979e05b..0000000 --- a/developer/source/DNS/unbound_x6.py +++ /dev/null @@ -1,33 +0,0 @@ -# unbound/unbound_x6.py -def configure(prov, planner, WriteFileMeta): - conf = """server: - verbosity: 1 - username: "unbound" - directory: "/etc/unbound" - chroot: "" - - do-ip6: no - do-udp: yes - do-tcp: yes - prefer-ip6: no - - interface: 127.0.0.1@5302 - access-control: 127.0.0.0/8 allow - - outgoing-interface: 10.8.0.2 - - hide-identity: yes - hide-version: yes - harden-referral-path: yes - harden-dnssec-stripped: yes - qname-minimisation: yes - aggressive-nsec: yes - prefetch: yes - cache-min-ttl: 60 - cache-max-ttl: 86400 - - auto-trust-anchor-file: "/var/lib/unbound/root.key" -""" - wfm = WriteFileMeta(dpath="/etc/unbound", fname="unbound-x6.conf", - owner="root", mode="0644") - planner.copy(wfm, content=conf) diff --git a/developer/source/cc/Db.lib.c b/developer/source/cc/Db.lib.c deleted file mode 100644 index 44c9b3f..0000000 --- a/developer/source/cc/Db.lib.c +++ /dev/null @@ -1,200 +0,0 @@ -#ifndef IFACE -#define Db·IMPLEMENTATION -#define IFACE -#endif - -#ifndef Db·IFACE -#define Db·IFACE - - #include - #include - - // Enum for exit codes - typedef enum { - Db·EXIT_SUCCESS = 0, - Db·EXIT_DB_OPEN_ERROR, - Db·EXIT_SCHEMA_LOAD_ERROR, - Db·EXIT_MEMORY_ALLOCATION_ERROR, - Db·EXIT_STATEMENT_PREPARE_ERROR, - Db·EXIT_STATEMENT_EXECUTE_ERROR - } Db·ExitCode; - - // Interface prototypes - sqlite3* Db·open(const char *db_path ,bool create_if_not_exists); - Db·ExitCode Db·load_schema(sqlite3 *db, const char *schema_path); - Db·ExitCode Db·log_event(sqlite3 *db, int event_id, int user_id); - int Db·query( - sqlite3 *db - ,const char *sql - ,int (*callback)(void * ,int ,char ** ,char **) - ,void *callback_arg - ); - void Db·close(sqlite3 *db); - -#endif // Db·IFACE - -#ifndef Db·IMPLEMENTATION - - #include - #include - #include - #include - #include - #include - #include - - sqlite3* Db·open(const char *db_path ,bool create_if_not_exists){ - sqlite3 *db; - FILE *file_check = fopen(db_path ,"r"); - - if(!file_check && create_if_not_exists){ - file_check = fopen(db_path ,"w"); - if(!file_check){ - fprintf( - stderr, - "Db::open failed to create database file '%s': %s\n", - db_path, - strerror(errno) - ); - return NULL; - } - fclose(file_check); - printf("Db::open created new database file '%s'\n", db_path); - }else if(!file_check){ - fprintf(stderr ,"Db::open database file '%s' not found and create flag not set\n" ,db_path); - return NULL; - }else{ - fclose(file_check); - } - - if( sqlite3_open(db_path ,&db) != SQLITE_OK ){ - fprintf( - stderr, - "Db::open failed to open database '%s': %s\n", - db_path, - sqlite3_errmsg(db) - ); - return NULL; - } - - printf("Db::open database '%s' opened successfully\n", db_path); - return db; - } - - // Load schema from a file - Db·ExitCode Db·load_schema(sqlite3 *db ,const char *schema_path){ - FILE *file = fopen(schema_path, "r"); - if(!file){ - fprintf - ( - stderr - ,"Db::load_schema failed to open schema file '%s'\n" - ,schema_path - ); - return Db·EXIT_SCHEMA_LOAD_ERROR; - } - - fseek(file, 0, SEEK_END); - long file_size = ftell(file); - rewind(file); - - char *schema = malloc(file_size + 1); - if(!schema){ - fprintf(stderr, "Db::load_schema memory allocation failed\n"); - fclose(file); - return Db·EXIT_MEMORY_ALLOCATION_ERROR; - } - - fread(schema, 1, file_size, file); - schema[file_size] = '\0'; - fclose(file); - - char *err_msg = NULL; - if( sqlite3_exec(db, schema, NULL, NULL, &err_msg) != SQLITE_OK ){ - fprintf - ( - stderr - ,"Db::load_schema failed to execute schema: %s\n" - ,err_msg - ); - sqlite3_free(err_msg); - free(schema); - return Db·EXIT_STATEMENT_EXECUTE_ERROR; - } - - printf("Db::load_schema schema initialized successfully from '%s'\n", schema_path); - free(schema); - return Db·EXIT_SUCCESS; - } - - // Log an event into the database - Db·ExitCode Db·log_event(sqlite3 *db ,int event_id ,int user_id){ - const char *sql_template = - "INSERT INTO db_event (event_time ,event_id ,user_id) " - "VALUES (CURRENT_TIMESTAMP ,? ,?);"; - sqlite3_stmt *stmt; - - if( sqlite3_prepare_v2(db ,sql_template ,-1 ,&stmt ,NULL) != SQLITE_OK ){ - fprintf - ( - stderr - ,"Db::log_event failed to prepare statement: %s\n" - ,sqlite3_errmsg(db) - ); - return Db·EXIT_STATEMENT_PREPARE_ERROR; - } - - sqlite3_bind_int(stmt, 1, event_id); - sqlite3_bind_int(stmt, 2, user_id); - - if( sqlite3_step(stmt) != SQLITE_DONE ){ - fprintf - ( - stderr - ,"Db::log_event failed to execute statement: %s\n" - ,sqlite3_errmsg(db) - ); - sqlite3_finalize(stmt); - return Db·EXIT_STATEMENT_EXECUTE_ERROR; - } - - sqlite3_finalize(stmt); - return Db·EXIT_SUCCESS; - } - - // Query Execution Function - int Db·query( - sqlite3 *db - ,const char *sql - ,int (*callback)(void * ,int ,char ** ,char **) - ,void *callback_arg - ){ - char *err_msg = NULL; - int rc = sqlite3_exec(db ,sql ,callback ,callback_arg ,&err_msg); - - if( rc != SQLITE_OK ){ - fprintf - ( - stderr - ,"Db::query SQL error: %s\nQuery: %s\n" - ,err_msg - ,sql - ); - sqlite3_free(err_msg); - return rc; - } - - return SQLITE_OK; - } - - // Close the database - void Db·close(sqlite3 *db){ - if( db ){ - sqlite3_close(db); - printf("Db::close database connection closed\n"); - } - } - -#endif // Db·IMPLEMENTATION - - diff --git a/developer/source/cc/DbSubu.lib.c b/developer/source/cc/DbSubu.lib.c deleted file mode 100644 index 4274dec..0000000 --- a/developer/source/cc/DbSubu.lib.c +++ /dev/null @@ -1,157 +0,0 @@ -#ifndef IFACE -#define DbSubu·IMPLEMENTATION -#define IFACE -#endif - -#ifndef DbSubu·IFACE -#define DbSubu·IFACE - - #include - - typedef struct DbSubu { - sqlite3 *db; - } DbSubu; - - - // db connection - DbSubu* DbSubu·open( const char *db_path ); - void DbSubu·close( DbSubu *db ); - int DbSubu·validate_schema( DbSubu *db ); - - // User Management - int DbSubu·add_user( DbSubu *db ,const char *name ,const char *home_directory ,int shell_id ,int parent_id ,int user_type_id ); - int DbSubu·delete_user( DbSubu *db ,int user_id ); - int DbSubu·get_user( DbSubu *db ,int user_id ,char **name ,char **home_directory ,int *shell_id ,int *parent_id ,int *user_type_id ); - - // Sharing Management - int DbSubu·add_share( DbSubu *db ,int user_id ,int other_user_id ,const char *permissions ); - int DbSubu·delete_share( DbSubu *db ,int share_id ); - - // System Resource Management - int DbSubu·grant_resource( DbSubu *db ,int user_id ,int resource_id ,int granted_by ); - int DbSubu·revoke_resource( DbSubu *db ,int user_id ,int resource_id ); - - // Event Logging - int DbSubu·log_event( DbSubu *db ,int event_id ,int user_id ); - -#endif // DbSubu·IFACE - -#ifdef DbSubu·IMPLEMENTATION - - #include - #include - #include - #include "Db.lib.c" - - // Open the database - DbSubu* DbSubu·open( const char *db_path ){ - DbSubu *db = malloc( sizeof(DbSubu) ); - if( !db ){ - fprintf( stderr ,"DbSubu·open:: failed to allocate memory for DbSubu\n" ); - return NULL; - } - db->db = Db·open(db_path ,true); - if( !db->db ){ - free( db ); - return NULL; - } - return db; - } - - // Close the database - void DbSubu·close( DbSubu *db ){ - if( db ){ - Db·close( db->db ); - free( db ); - } - } - - // Validate the schema - int DbSubu·validate_schema( DbSubu *db ){ - // Validation logic for ensuring the schema is correct - return 0; // Placeholder for schema validation implementation - } - - // Add a user - int DbSubu·add_user( DbSubu *db ,const char *name ,const char *home_directory ,int shell_id ,int parent_id ,int user_type_id ){ - char sql[256]; - snprintf - ( - sql - ,sizeof(sql) - ,"INSERT INTO user (name ,home_directory ,shell ,parent_id ,user_type_id) VALUES ('%s' ,'%s' ,%d ,%d ,%d);" - ,name - ,home_directory - ,shell_id - ,parent_id - ,user_type_id - ); - return Db·query( db->db ,sql ,NULL ,NULL ); - } - - // Delete a user - int DbSubu·delete_user( DbSubu *db ,int user_id ){ - char sql[128]; - snprintf( sql ,sizeof(sql) ,"DELETE FROM user WHERE id = %d;" ,user_id ); - return Db·query( db->db ,sql ,NULL ,NULL ); - } - - // Log an event - int DbSubu·log_event( DbSubu *db ,int event_id ,int user_id ){ - char sql[128]; - snprintf - ( - sql - ,sizeof(sql) - ,"INSERT INTO db_event (event_id ,user_id) VALUES (%d ,%d);" - ,event_id - ,user_id - ); - return Db·query( db->db ,sql ,NULL ,NULL ); - } - - // Add to a list (private function) - static int add_to_list( sqlite3 *db ,const char *list_name ,const char *entry_name ){ - char sql[128]; - snprintf - ( - sql - ,sizeof(sql) - ,"INSERT INTO %s (name) VALUES ('%s');" - ,list_name - ,entry_name - ); - return Db·query( db ,sql ,NULL ,NULL ); - } - - // Get list entries (private function) - static char** get_list( sqlite3 *db ,const char *list_name ,int *count ){ - char sql[128]; - snprintf( sql ,sizeof(sql) ,"SELECT name FROM %s;" ,list_name ); - - struct ListResult { - char **entries; - int count; - } result = { NULL ,0 }; - - int callback( void *arg ,int argc ,char **argv ,char **col_names ){ - (void)argc; (void)col_names; - struct ListResult *res = arg; - res->entries = realloc( res->entries ,(res->count + 1) * sizeof(char *) ); - res->entries[res->count++] = strdup( argv[0] ); - return 0; - } - - if( Db·query( db ,sql ,callback ,&result ) != SQLITE_OK ){ - for( int i = 0; i < result.count; ++i ){ - free( result.entries[i] ); - } - free( result.entries ); - return NULL; - } - - *count = result.count; - return result.entries; - } - -#endif // DbSubu·IMPLEMENTATION diff --git a/developer/source/cc/Db_close.cli.c b/developer/source/cc/Db_close.cli.c deleted file mode 100644 index 8575260..0000000 --- a/developer/source/cc/Db_close.cli.c +++ /dev/null @@ -1,32 +0,0 @@ -#define IFACE -#include -#include -#include -#include -#include "Db.lib.c" - -int main(int argc ,char *argv[]){ - if( argc < 2 ){ - fprintf(stderr ,"Usage: %s \n" ,argv[0]); - return EXIT_FAILURE; - } - - // Parse the SQLite handle from the command-line argument - uintptr_t handle_as_int; - if( sscanf(argv[1] ,"%lx" ,&handle_as_int) != 1 ){ - fprintf(stderr ,"%s::main failed to parse handle '%s'\n" ,argv[0] ,argv[1]); - return EXIT_FAILURE; - } - - sqlite3 *db = (sqlite3 *)handle_as_int; - - // Attempt to close the database - if( db ){ - Db·close(db); - printf("Database handle %p closed successfully.\n" ,db); - return EXIT_SUCCESS; - } else { - fprintf(stderr ,"Invalid or NULL database handle: %p\n" ,db); - return EXIT_FAILURE; - } -} diff --git a/developer/source/cc/Hello.cli.c b/developer/source/cc/Hello.cli.c deleted file mode 100644 index 2a18583..0000000 --- a/developer/source/cc/Hello.cli.c +++ /dev/null @@ -1,67 +0,0 @@ - -/* - The subu server command line interface. - - Usage: - server [-s ] [-l ] [arguments...] - - Options: - -s Specify the Unix socket file path. Default: ./socket - -l Specify the log file path. Default: ./log.txt -*/ - -#define IFACE -#include -#include -#include -#include -#include "Hello.lib.c" - -// Define defaults -#define DEFAULT_SOCKET_PATH "socket" - -int main( int argc ,char **argv ){ - char *socket_path = DEFAULT_SOCKET_PATH; - int error_flag = 0; - - int opt; - while( (opt = getopt(argc ,argv ,":s:l:")) != -1 ){ - switch( opt ){ - case 's': - socket_path = optarg; - break; - case '?': // Unknown option - fprintf( stderr ,"%s::main unknown option '-%c'\n" ,argv[0] ,optopt ); - error_flag = 1; - break; - case ':': // Missing argument - fprintf( stderr ,"%s::main missing argument for option '-%c'\n" ,argv[0] ,optopt ); - error_flag = 1; - break; - } - } - - if( optind > argc ){ - fprintf( stderr ,"%s::main optind(%d) > argc(%d), which indicates an option parsing bug\n" ,argv[0] ,optind ,argc ); - error_flag = 1; - } - - // Exit on error after processing all options - if( error_flag ){ - fprintf( stderr ,"%s::main usage: %s [-s ] [arguments...]\n" ,argv[0] ,argv[0] ); - return EXIT_FAILURE; - } - - // Rebase argv to prepare for run - if(optind > 0){ - argv[optind - 1] = argv[0]; // Program name at the new base - argc -= (optind - 1); - argv += (optind - 1); - } - - // Log parsed options - printf( "%s::main socket_path='%s'\n" ,argv[0] ,socket_path ); - - // Call the hello function - return Hello·run(argc ,argv ,socket_path); -} diff --git a/developer/source/cc/Hello.lib.c b/developer/source/cc/Hello.lib.c deleted file mode 100644 index 28d0f19..0000000 --- a/developer/source/cc/Hello.lib.c +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef IFACE -#define Hello·IMPLEMENTATION -#define IFACE -#endif - -#ifndef Hello·IFACE -#define Hello·IFACE - - // Necessary interface includes - // .. none - - // Interface prototypes - int Hello·run(int argc ,char** argv ,char *socket_path); - -#endif // Hello·IFACE - -#ifdef Hello·IMPLEMENTATION - - // Implementation-specific includes - #include - #include - #include - #include - #include - #include - #include - #include - - // Constants - #define Hello·SOCKET_PATH "/var/user_data/Thomas-developer/subu/developer/mockup/subu_server_home/subu_server.sock" - #define Hello·LOG_PATH "server_test.log" - #define Hello·BUFFER_SIZE 256 - - int Hello·run(int argc ,char** argv ,char *socket_path){ - (void)argc; // Suppress unused variable warnings - (void)argv; - - int client_fd; - struct sockaddr_un address; - char buffer[Hello·BUFFER_SIZE]; - - client_fd = socket(AF_UNIX ,SOCK_STREAM ,0); - if( client_fd == -1 ){ - perror("Hello·run:: error opening socket"); - return EXIT_FAILURE; - } - - // Configure server socket address - memset(&address ,0 ,sizeof(address)); - address.sun_family = AF_UNIX; - strncpy(address.sun_path ,socket_path ,sizeof(address.sun_path) - 1); - - // Connect to the server - if( connect(client_fd ,(struct sockaddr *)&address ,sizeof(address)) == -1 ){ - perror("Hello·run:: error connecting to server"); - close(client_fd); - return EXIT_FAILURE; - } - - // Send message to the server - char *out_buf = "hello\n"; - if( write(client_fd ,out_buf ,strlen(out_buf)) == -1 ){ - perror("Hello·run:: error writing to server"); - return EXIT_FAILURE; - } - - printf("Hello·run:: sent \"%s\"\n" ,out_buf); - - // Clean up - close(client_fd); - - return EXIT_SUCCESS; - } - -#endif // Hello·IMPLEMENTATION diff --git a/developer/source/cc/README.org b/developer/source/cc/README.org deleted file mode 100644 index 17433fb..0000000 --- a/developer/source/cc/README.org +++ /dev/null @@ -1,5 +0,0 @@ - -These are currently not used. Eventually the main subu commands will be C programs and setuid root so that master users (and only master users) can seamlessly manipulate sub users. - -I started on this, then decided to let the scripts stabilize first. - diff --git a/developer/source/cc/Server.cli.c b/developer/source/cc/Server.cli.c deleted file mode 100644 index 6b38b3b..0000000 --- a/developer/source/cc/Server.cli.c +++ /dev/null @@ -1,92 +0,0 @@ -/* - The subu server command line interface. - - Usage: - server [-s ] [-l ] [arguments...] - - Options: - -s Specify the Unix socket file path. Default: ./socket - -l Specify the log file path. Default: ./log.txt -*/ - -#define IFACE -#include -#include -#include -#include -#include "Server.lib.c" - -// Define defaults -#define DEFAULT_SOCKET_PATH "socket" -#define DEFAULT_LOG_PATH "log.txt" - -int main( int argc ,char **argv ){ - char *socket_path = DEFAULT_SOCKET_PATH; - char *log_path = DEFAULT_LOG_PATH; - int error_flag = 0; - - // Parse command-line options - int opt; - while( (opt = getopt(argc ,argv ,":s:l:")) != -1 ){ - switch( opt ){ - case 's': - socket_path = optarg; - break; - case 'l': - log_path = optarg; - break; - case '?': // Unknown option - fprintf( stderr ,"%s::main unknown option '-%c'\n" ,argv[0] ,optopt ); - error_flag = 1; - break; - case ':': // Missing argument - fprintf( stderr ,"%s::main missing argument for option '-%c'\n" ,argv[0] ,optopt ); - error_flag = 1; - break; - } - } - - if( optind > argc ){ - fprintf( stderr ,"%s::main optind(%d) > argc(%d), which indicates an option parsing bug\n" ,argv[0] ,optind ,argc ); - error_flag = 1; - } - - // Exit on error after processing all options - if( error_flag ){ - fprintf( stderr ,"%s::main usage: %s [-s ] [-l ] [arguments...]\n" ,argv[0] ,argv[0] ); - return EXIT_FAILURE; - } - - // Rebase argv to prepare for run - if(optind > 0){ - argv[optind - 1] = argv[0]; // Program name at the new base - argc -= (optind - 1); - argv += (optind - 1); - } - - // Open the log file - FILE *log_file = Server·open_log(log_path); - if( !log_file ){ - fprintf( stderr ,"%s::main unable to open log file '%s'\n" ,argv[0] ,log_path ); - return Server·EXIT_LOG_FILE_ERROR; - } - - // Log parsed options - fprintf( log_file ,"%s::main socket_path='%s'\n" ,argv[0] ,socket_path ); - fprintf( log_file ,"%s::main log_path='%s'\n" ,argv[0] ,log_path ); - fflush(log_file); - - // Prepare file descriptors for error reporting - int fds[] = { fileno(stderr), fileno(log_file), -1 }; - - // Call the core server function - int exit_code = Server·run(argc ,argv ,fds ,socket_path); - - // Report return condition - Server·return_condition_report(exit_code ,fds); - - // Clean up - fclose(log_file); - - return exit_code; -} diff --git a/developer/source/cc/Server.lib.c b/developer/source/cc/Server.lib.c deleted file mode 100644 index 16451a3..0000000 --- a/developer/source/cc/Server.lib.c +++ /dev/null @@ -1,219 +0,0 @@ -#ifndef IFACE -#define Server·IMPLEMENTATION -#define IFACE -#endif - -#ifndef Server·IFACE -#define Server·IFACE - - #include - #include - - // Exit codes - typedef enum { - Server·EXIT_SUCCESS = 0, - Server·EXIT_LOG_FILE_ERROR, - Server·EXIT_SOCKET_CREATION_ERROR, - Server·EXIT_BIND_ERROR, - Server·EXIT_LISTEN_ERROR, - Server·EXIT_ACCEPT_ERROR - } Server·ExitCode; - - // Interface prototypes - int Server·run( int argc ,char **argv ,int *fds ,char *socket_path ); - void Server·return_condition_report( Server·ExitCode code ,int *fds ); - void Server·report( int *fds ,const char *message ); - FILE* Server·open_log( const char *log_path ); - -#endif // Server·IFACE - -#ifdef Server·IMPLEMENTATION - - // Implementation-specific includes - #include - #include - #include - #include - #include // Ensure full definition of struct ucred - #include - #include - #include - #include - - // Constants - #define Server·BUFFER_SIZE 256 - #define MAX_ARGC 16 - - // Internal function prototypes - static void parse( int *fds ,struct ucred *client_cred ,char *input_line ); - static void hello( int *fds ,int argc ,char *argv[] ,struct ucred *client_cred ); - - // Log a message with time and to multiple destinations - void Server·report( int *fds ,const char *message ){ - time_t now = time(NULL); - char time_buffer[32]; - strftime(time_buffer ,sizeof(time_buffer) ,"%Y-%m-%dT%H:%M:%SZ" ,gmtime(&now)); - - for( int i = 0; fds[i] != -1; ++i ){ - dprintf( fds[i] ,"\n%s:: %s" ,time_buffer ,message ); - } - } - - int Server·run( int argc ,char **argv ,int *fds ,char *socket_path ){ - (void)argc; // Suppress unused variable warnings - (void)argv; - - int server_fd ,client_fd; - struct sockaddr_un address; - - // Create socket - if( (server_fd = socket(AF_UNIX ,SOCK_STREAM ,0)) == -1 ){ - Server·report(fds ,"Socket creation failed."); - return Server·EXIT_SOCKET_CREATION_ERROR; - } - - // Configure socket address - memset(&address ,0 ,sizeof(address)); - address.sun_family = AF_UNIX; - strncpy(address.sun_path ,socket_path ,sizeof(address.sun_path) - 1); - - unlink(socket_path); - if( bind(server_fd ,(struct sockaddr *)&address ,sizeof(address)) == -1 ){ - Server·report(fds ,"Binding socket failed."); - close(server_fd); - return Server·EXIT_BIND_ERROR; - } - - if( listen(server_fd ,5) == -1 ){ - Server·report(fds ,"Listening on socket failed."); - close(server_fd); - return Server·EXIT_LISTEN_ERROR; - } - - char startup_message[Server·BUFFER_SIZE]; - snprintf(startup_message ,Server·BUFFER_SIZE ,"Server running with socket '%s' ,awaiting connections..." ,socket_path); - Server·report(fds ,startup_message); - - while( (client_fd = accept(server_fd ,NULL ,NULL)) != -1 ){ - struct ucred client_cred; - socklen_t len = sizeof(client_cred); - - if( getsockopt(client_fd ,SOL_SOCKET ,SO_PEERCRED ,&client_cred ,&len) == -1 ){ - Server·report(fds ,"Failed to retrieve client credentials."); - close(client_fd); - continue; - } - - char connection_message[Server·BUFFER_SIZE]; - snprintf(connection_message ,Server·BUFFER_SIZE , - "Connection from PID=%d ,UID=%d ,GID=%d" , - client_cred.pid ,client_cred.uid ,client_cred.gid); - Server·report(fds ,connection_message); - - char buffer[Server·BUFFER_SIZE]; - memset(buffer ,0 ,Server·BUFFER_SIZE); - ssize_t bytes_read = read(client_fd ,buffer ,Server·BUFFER_SIZE - 1); - if(bytes_read > 0){ - char *line = strtok(buffer ,"\n"); - while(line != NULL){ - parse(fds ,&client_cred ,line); - line = strtok(NULL ,"\n"); - } - } else if(bytes_read == -1){ - Server·report(fds ,"Error reading from client."); - } - - close(client_fd); - } - - Server·report(fds ,"Error accepting connection."); - close(server_fd); - unlink(socket_path); - return Server·EXIT_ACCEPT_ERROR; - } - - // Parse a single input line and dispatch to the appropriate command - static void parse( int *fds ,struct ucred *client_cred ,char *input_line ){ - char *argv[MAX_ARGC + 1] = {0}; - int argc = 0; - - char *line_copy = strdup(input_line); - if(!line_copy){ - Server·report(fds ,"Failed to duplicate input line."); - return; - } - - char *token = strtok(line_copy ," "); - while(token != NULL && argc < MAX_ARGC){ - argv[argc++] = token; - token = strtok(NULL ," "); - } - - if(argc > 0){ - if( strcmp(argv[0] ,"hello") == 0 ){ - hello(fds ,argc ,argv ,client_cred); - }else{ - char unknown_command_message[Server·BUFFER_SIZE]; - snprintf(unknown_command_message ,Server·BUFFER_SIZE ,"Unknown command '%s'" ,argv[0]); - Server·report(fds ,unknown_command_message); - } - } - - free(line_copy); - } - - // Example command: hello - static void hello( int *fds ,int argc ,char *argv[] ,struct ucred *client_cred ){ - char hello_message[Server·BUFFER_SIZE]; - snprintf(hello_message ,Server·BUFFER_SIZE , - "hello:: invoked by PID=%d ,UID=%d ,GID=%d" , - client_cred->pid ,client_cred->uid ,client_cred->gid); - Server·report(fds ,hello_message); - - for( int i = 1; i < argc; ++i ){ - char argument_message[Server·BUFFER_SIZE]; - snprintf(argument_message ,Server·BUFFER_SIZE ," Arg %d: %s" ,i ,argv[i]); - Server·report(fds ,argument_message); - } - } - - // Error reporting function - void Server·return_condition_report( Server·ExitCode code ,int *fds ){ - const char *message; - switch( code ){ - case Server·EXIT_SUCCESS: - message = "Operation completed successfully."; - break; - case Server·EXIT_LOG_FILE_ERROR: - message = "Failed to open log file."; - break; - case Server·EXIT_SOCKET_CREATION_ERROR: - message = "Socket creation failed."; - break; - case Server·EXIT_BIND_ERROR: - message = "Binding socket failed."; - break; - case Server·EXIT_LISTEN_ERROR: - message = "Listening on socket failed."; - break; - case Server·EXIT_ACCEPT_ERROR: - message = "Error accepting connection."; - break; - default: - message = "Unknown error occurred."; - break; - } - - Server·report(fds ,message); - } - - // Log file opener - FILE* Server·open_log( const char *log_path ){ - FILE *log_file = fopen(log_path ,"a+"); - if( log_file ){ - Server·report( (int[]){fileno(log_file), -1} ,"Log file opened."); - } - return log_file; - } - -#endif // Server·IMPLEMENTATION diff --git a/developer/source/cc/db_add_user.cli.c b/developer/source/cc/db_add_user.cli.c deleted file mode 100644 index 9ae9874..0000000 --- a/developer/source/cc/db_add_user.cli.c +++ /dev/null @@ -1,36 +0,0 @@ -#define IFACE -#include -#include -#include -#include "DbSubu.lib.c" - -int main(int argc ,char *argv[]){ - if( argc < 7 ){ - fprintf(stderr, "Usage: %s \n", argv[0]); - return 1; - } - - const char *db_path = argv[1]; - const char *name = argv[2]; - const char *home_directory = argv[3]; - int shell_id = atoi(argv[4]); - int parent_id = atoi(argv[5]); - int user_type_id = atoi(argv[6]); - - DbSubu *db = DbSubu·open(db_path); - if( !db ){ - fprintf(stderr, "Failed to open database: %s\n", db_path); - return 1; - } - - int result = DbSubu·add_user(db, name, home_directory, shell_id, parent_id, user_type_id); - DbSubu·close(db); - - if( result == 0 ){ - printf("User added successfully.\n"); - return 0; - } else { - fprintf(stderr, "Failed to add user.\n"); - return 1; - } -} diff --git a/developer/source/cc/db_delete_user.cli.c b/developer/source/cc/db_delete_user.cli.c deleted file mode 100644 index cf2e621..0000000 --- a/developer/source/cc/db_delete_user.cli.c +++ /dev/null @@ -1,32 +0,0 @@ -#define IFACE -#include -#include -#include -#include "DbSubu.lib.c" - -int main(int argc ,char *argv[]){ - if( argc < 3 ){ - fprintf(stderr, "Usage: %s \n", argv[0]); - return 1; - } - - const char *db_path = argv[1]; - int user_id = atoi(argv[2]); - - DbSubu *db = DbSubu·open(db_path); - if( !db ){ - fprintf(stderr, "Failed to open database: %s\n", db_path); - return 1; - } - - int result = DbSubu·delete_user(db, user_id); - DbSubu·close(db); - - if( result == 0 ){ - printf("User deleted successfully.\n"); - return 0; - } else { - fprintf(stderr, "Failed to delete user.\n"); - return 1; - } -} diff --git a/developer/source/cc/db_log_event.cli.c b/developer/source/cc/db_log_event.cli.c deleted file mode 100644 index cf2e621..0000000 --- a/developer/source/cc/db_log_event.cli.c +++ /dev/null @@ -1,32 +0,0 @@ -#define IFACE -#include -#include -#include -#include "DbSubu.lib.c" - -int main(int argc ,char *argv[]){ - if( argc < 3 ){ - fprintf(stderr, "Usage: %s \n", argv[0]); - return 1; - } - - const char *db_path = argv[1]; - int user_id = atoi(argv[2]); - - DbSubu *db = DbSubu·open(db_path); - if( !db ){ - fprintf(stderr, "Failed to open database: %s\n", db_path); - return 1; - } - - int result = DbSubu·delete_user(db, user_id); - DbSubu·close(db); - - if( result == 0 ){ - printf("User deleted successfully.\n"); - return 0; - } else { - fprintf(stderr, "Failed to delete user.\n"); - return 1; - } -} diff --git a/developer/source/cc/db_open.cli.c b/developer/source/cc/db_open.cli.c deleted file mode 100644 index f64ba5d..0000000 --- a/developer/source/cc/db_open.cli.c +++ /dev/null @@ -1,34 +0,0 @@ -#define IFACE -#include -#include -#include -#include "Db.lib.c" - -// Define default database path -#define DEFAULT_DB_PATH "db.sqlite" - -int main(int argc ,char *argv[]){ - const char *db_path = (argc > 1) ? argv[1] : DEFAULT_DB_PATH; - - // Open the database using Db·open - sqlite3 *db = Db·open(db_path ,true); - if( !db ){ - fprintf(stderr ,"Failed to open or create database: %s\n" ,db_path); - return EXIT_FAILURE; - } - - // Check if the file was created or already existed - printf("Database %s opened successfully\n" ,db_path); - - // Attempt to close the database - if( db ){ - Db·close(db); - printf("Database handle %p closed successfully.\n" ,db); - return EXIT_SUCCESS; - } else { - fprintf(stderr ,"Invalid or NULL database handle: %p\n" ,db); - return EXIT_FAILURE; - } - - return EXIT_SUCCESS; -} diff --git a/developer/source/cc/db_validate_schema.cli.c b/developer/source/cc/db_validate_schema.cli.c deleted file mode 100644 index 88d20e1..0000000 --- a/developer/source/cc/db_validate_schema.cli.c +++ /dev/null @@ -1,25 +0,0 @@ -#define IFACE -#include -#include -#include -#include "DbSubu.lib.c" - -int main(int argc ,char *argv[]){ - const char *db_path = (argc > 1) ? argv[1] : "db.sqlite"; - DbSubu *db = DbSubu·open(db_path); - if( !db ){ - fprintf(stderr, "Failed to open database: %s\n", db_path); - return 1; - } - - int result = DbSubu·validate_schema(db); - DbSubu·close(db); - - if( result == 0 ){ - printf("Schema validation passed.\n"); - return 0; - } else { - fprintf(stderr, "Schema validation failed.\n"); - return 1; - } -} diff --git a/developer/source/cc/scratchpad/.gitignore b/developer/source/cc/scratchpad/.gitignore deleted file mode 100644 index 120f485..0000000 --- a/developer/source/cc/scratchpad/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!/.gitignore diff --git a/developer/source/cc/sqlite/schema.sql b/developer/source/cc/sqlite/schema.sql deleted file mode 100644 index faf7053..0000000 --- a/developer/source/cc/sqlite/schema.sql +++ /dev/null @@ -1,79 +0,0 @@ --- Schema for the subu server --- - --- List Tables --- SQLite does not support PSQL style types --- --- CREATE TYPE List AS ( --- id SERIAL, -- Integer ID --- name TEXT NOT NULL -- Name of the list entry --- ); --- --- so though these all have the same `List` form, they are declared independently --- - CREATE TABLE db_property_list ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL UNIQUE - ); - - CREATE TABLE db_event_list ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL UNIQUE - ); - - CREATE TABLE shell_list ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL UNIQUE - ); - - CREATE TABLE system_resource_list ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL UNIQUE - ); - - CREATE TABLE user_type_list ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - name TEXT NOT NULL UNIQUE - ); - --- Data Tables --- - CREATE TABLE db_property ( - id INTEGER PRIMARY KEY, - property_id INTEGER NOT NULL REFERENCES db_property_list(id), - type TEXT NOT NULL, - value TEXT, - created_at DATETIME DEFAULT CURRENT_TIMESTAMP - ); - - CREATE TABLE db_event ( - id INTEGER PRIMARY KEY, - event_time DATETIME DEFAULT CURRENT_TIMESTAMP, - event_id INTEGER NOT NULL REFERENCES db_event_list(id), - user_id INTEGER REFERENCES user(id) - ); - - CREATE TABLE user ( - id INTEGER PRIMARY KEY, - login_gid INTEGER NOT NULL UNIQUE, - name TEXT NOT NULL UNIQUE, - home_directory TEXT NOT NULL, - shell INTEGER NOT NULL REFERENCES shell_list(id), - parent_id INTEGER REFERENCES user(id), - user_type_id INTEGER NOT NULL REFERENCES user_type_list(id), - status TEXT DEFAULT 'active' - ); - - CREATE TABLE share ( - id INTEGER PRIMARY KEY, - user_id INTEGER NOT NULL REFERENCES user(id), - other_user_id INTEGER NOT NULL REFERENCES user(id), - permissions TEXT NOT NULL - ); - - CREATE TABLE system_resource ( - id INTEGER PRIMARY KEY, - user_id INTEGER NOT NULL REFERENCES user(id), - resource_id INTEGER NOT NULL REFERENCES system_resource_list(id), - granted_by INTEGER REFERENCES user(id) - ); diff --git a/developer/source/deprecated/.gitignore b/developer/source/deprecated/.gitignore deleted file mode 100644 index 120f485..0000000 --- a/developer/source/deprecated/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!/.gitignore diff --git a/developer/source/deprecated/server.lib.c b/developer/source/deprecated/server.lib.c deleted file mode 100644 index baf5469..0000000 --- a/developer/source/deprecated/server.lib.c +++ /dev/null @@ -1,104 +0,0 @@ -#ifndef IFACE -#define Server·IMPLEMENTATION -#define IFACE -#endif - -#ifndef Server·IFACE -#define Server·IFACE - - // Necessary interface includes - #include - #include - #include - - // Interface prototypes - int Server·run(); - -#endif // Server·IFACE - -#ifdef Server·IMPLEMENTATION - - // Implementation-specific includes - #include - #include - #include - #include - #include - - // Constants - #define Server·SOCKET_PATH "/var/user_data/Thomas-developer/subu/developer/mockup/subu_server_home/subu_server.sock" - #define Server·LOG_PATH "server.log" - #define Server·BUFFER_SIZE 256 - - int Server·run(){ - int server_fd ,client_fd; - struct sockaddr_un address; - char buffer[Server·BUFFER_SIZE]; - FILE *log_file; - - // Open the log file - log_file = fopen(Server·LOG_PATH ,"a+"); - if( log_file == NULL ){ - perror("Server·run:: error opening log file"); - return EXIT_FAILURE; - } - - // Create the socket - if( (server_fd = socket(AF_UNIX ,SOCK_STREAM ,0)) == -1 ){ - perror("Server·run:: error creating socket"); - fclose(log_file); - return EXIT_FAILURE; - } - - // Configure socket address - memset(&address ,0 ,sizeof(address)); - address.sun_family = AF_UNIX; - strncpy(address.sun_path ,Server·SOCKET_PATH ,sizeof(address.sun_path) - 1); - - // Bind the socket - unlink(Server·SOCKET_PATH); // Remove existing file if present - if( bind(server_fd ,(struct sockaddr *)&address ,sizeof(address)) == -1 ){ - perror("Server·run:: error binding socket"); - fclose(log_file); - close(server_fd); - return EXIT_FAILURE; - } - - // Listen for connections - if( listen(server_fd ,5) == -1 ){ - perror("Server·run:: error listening on socket"); - fclose(log_file); - close(server_fd); - return EXIT_FAILURE; - } - - printf("Server·run:: server running, waiting for connections...\n"); - - // Accept and handle client connections - while( (client_fd = accept(server_fd ,NULL ,NULL)) != -1 ){ - ssize_t bytes_read; - printf("Server·run:: connection made!\n"); - - memset(buffer ,0 ,Server·BUFFER_SIZE); - bytes_read = read(client_fd ,buffer ,Server·BUFFER_SIZE - 1); - if( bytes_read > 0 ){ - printf("Server·run:: connection said: %s\n" ,buffer); - fprintf(log_file ,"Received: %s\n" ,buffer); - fflush(log_file); - } else if( bytes_read == -1 ){ - perror("Server·run:: error reading from client"); - } - - close(client_fd); - } - - // Clean up - perror("Server·run:: error accepting connection"); - fclose(log_file); - close(server_fd); - unlink(Server·SOCKET_PATH); - - return EXIT_FAILURE; - } - -#endif // Server·IMPLEMENTATION diff --git a/developer/source/device_management/bestow_audio.sh b/developer/source/device_management/bestow_audio.sh deleted file mode 100755 index 20545e8..0000000 --- a/developer/source/device_management/bestow_audio.sh +++ /dev/null @@ -1,76 +0,0 @@ -#!/bin/bash -# give_audio.sh — run as master user "Thomas" -# Usage: ./give_audio.sh -# Example: ./give_audio.sh Thomas-US # give card to subuser -# ./give_audio.sh Thomas # reclaim for master - -set -euo pipefail - -target="${1-}" -if [[ -z "$target" ]]; then - echo "❌ usage: $0 "; exit 2 -fi - -master="Thomas" - -# don't use sudo -v as it dumps the password into the emacs shell -sudo echo >& /dev/null - -run() { echo "+ $*"; eval "$*"; } - -# --- sanity checks --- -if ! id "$target" &>/dev/null; then - echo "❌ user not found: $target"; exit 1 -fi -if [[ "$(id -un)" != "$master" ]]; then - echo "❌ must be run as master user '$master'"; exit 1 -fi - -# Gather all subusers (Thomas-*) -mapfile -t subusers < <(getent passwd | awk -F: '$1 ~ /^'"$master"'-/ {print $1}' | sort) - -stop_master_audio() { - run "systemctl --user stop pipewire pipewire-pulse wireplumber || true" -} - -start_master_audio() { - # start services (not only sockets) to avoid lazy-activation races - run "systemctl --user start pipewire.service pipewire-pulse.service wireplumber.service" -} - -stop_subu_audio() { - local u="$1" - run "sudo machinectl shell ${u}@ /bin/bash -lc 'systemctl --user stop pipewire pipewire-pulse wireplumber || true'" -} - -start_subu_audio() { - local u="$1" - # Keep subuser from trying to bind to logind (not the active seat) - run "sudo machinectl shell ${u}@ /bin/bash -lc 'export WIREPLUMBER_DISABLE_PLUGINS=logind; systemctl --user import-environment WIREPLUMBER_DISABLE_PLUGINS; systemctl --user start pipewire.service pipewire-pulse.service wireplumber.service'" -} - -# --- stop everyone first (to release ALSA cleanly) --- -stop_master_audio -for u in "${subusers[@]}"; do - stop_subu_audio "$u" -done - -# Small settle time so ALSA reservation clears -sleep 0.5 - -# --- start only the target --- -if [[ "$target" == "$master" ]]; then - start_master_audio -else - # ensure linger for target so user services can run - run "sudo loginctl enable-linger '$target' || true" - start_subu_audio "$target" -fi - -# --- quick verification (best-effort) --- -if [[ "$target" == "$master" ]]; then - # Show default sink name (may require pipewire-pulse to be fully up) - run "pactl info | sed -n 's/^Default Sink: /Default Sink: /p'" -else - run "sudo machinectl shell ${target}@ /bin/bash -lc 'pactl info | sed -n \\\"s/^Default Sink: /Default Sink: /p\\\"'" -fi diff --git a/developer/source/login/login_to_subu.sh b/developer/source/login/login_to_subu.sh deleted file mode 100755 index ca28743..0000000 --- a/developer/source/login/login_to_subu.sh +++ /dev/null @@ -1,135 +0,0 @@ -#!/bin/bash -# launch_subu.sh — Start a subuser shell (console or GUI-aware, with systemd user session) - -set -euo pipefail -umask 0077 - -subu="$1" -if [ -z "$subu" ]; then - echo "❌ No subuser name supplied" - exit 1 -fi - -subu_user="Thomas-$subu" -if ! id "$subu_user" &>/dev/null; then - echo "❌ User $subu_user does not exist" - exit 1 -fi - -# Check required commands -error_flag=0 -for cmd in machinectl xauth xhost dbus-run-session; do - if ! command -v "$cmd" &>/dev/null; then - echo "❌ $cmd not found" - error_flag=1 - fi -done -if [ "$error_flag" -eq 1 ]; then - exit 1 -fi - -# don't use sudo -v, because it will echo the password into the emacs shell -sudo echo >& /dev/null - - -# Something broke when I turned this off. What was it. Will have to turn it off again and -# test. -# -# Enable lingering so user services can persist -sudo loginctl enable-linger "$subu_user" - -# Decide how to set the use_xauth and use_xhost flags. -# -# As of the time of this writing, on my machines, Wayland insists on -# xauth, while my X11 is refuses to use it, thus it needs xhost control. -# So this is how I determine how to set the flags here. -# - -# bash will evaluate this variables inside a quoted if even when the -# gate is falase, so everything needs to be initialized, whether used -# or not. -subu_Xauthority_path="" -use_xauth=0 -use_xhost=0 -if [[ -n "${WAYLAND_DISPLAY:-}" ]]; then - has_display=true - XDG_SESSION_TYPE="wayland" - subu_Xauthority_path="$HOME/subu/$subu/.Xauthority" - use_xauth=1 - use_xhost=0 - echo "🌀 Wayland session - Using xauth for access control" - -elif [[ -n "${DISPLAY:-}" ]]; then - has_display=true - XDG_SESSION_TYPE="x11" - use_xauth=0 - use_xhost=1 - echo "🧱 X11 session - Using xhost for access control" - -else - has_display=false - XDG_SESSION_TYPE="tty" - use_xauth=0 - use_xhost=0 - echo "🖳 Console session (no X detected)" -fi - -if [[ "$use_xhost" -eq 1 ]]; then - xhost +SI:localuser:"$subu_user" -fi -if [[ "$use_xauth" -eq 1 ]]; then - mkdir -p "$(dirname "$subu_Xauthority_path")" - touch "$subu_Xauthority_path" - xauth extract "$subu_Xauthority_path" "$DISPLAY" -fi - -if $has_display; then - - - sudo machinectl shell "$subu_user"@ /bin/bash -c " - - # --- session env from parent --- - export DISPLAY=\"${DISPLAY:-${WAYLAND_DISPLAY}}\"; - export XDG_RUNTIME_DIR='/run/user/$(id -u "$subu_user")'; - export XDG_SESSION_TYPE=\"$XDG_SESSION_TYPE\"; - export XDG_SESSION_CLASS=\"user\"; - export XDG_DATA_DIRS=\"/usr/share/gnome:/usr/local/share/:/usr/share/\"; - export USE_XAUTH=$use_xauth - - # Only set XAUTHORITY when we actually prepared it (Wayland/xauth case) - if [[ \"\$USE_XAUTH\" -eq 1 ]]; then - export XAUTHORITY=\"$subu_Xauthority_path\" - fi - - if command -v /usr/bin/gnome-keyring-daemon &>/dev/null; then - eval \$(/usr/bin/gnome-keyring-daemon --start) - export GNOME_KEYRING_CONTROL GNOME_KEYRING_PID - fi - - # WirePlumber: ignore logind (subuser isn't the active seat) - systemctl --user set-environment WIREPLUMBER_DISABLE_PLUGINS=logind - systemctl --user import-environment DISPLAY XAUTHORITY WAYLAND_DISPLAY XDG_RUNTIME_DIR XDG_SESSION_TYPE - - # Bring up audio (sockets first, then services) - systemctl --user enable --now pipewire.socket pipewire-pulse.socket >/dev/null 2>&1 || true - systemctl --user restart wireplumber pipewire pipewire-pulse - - exec dbus-run-session -- bash -l - " - -else - - # Console mode with DBus session (give it audio too) - sudo machinectl shell "$subu_user"@ /bin/bash -c " - export XDG_RUNTIME_DIR='/run/user/$(id -u "$subu_user")}'; - - systemctl --user set-environment WIREPLUMBER_DISABLE_PLUGINS=logind - systemctl --user import-environment XDG_RUNTIME_DIR - systemctl --user enable --now pipewire.socket pipewire-pulse.socket >/dev/null 2>&1 || true - systemctl --user restart wireplumber pipewire pipewire-pulse - - exec dbus-run-session -- bash -l - " -fi - - diff --git a/developer/source/login/logout_subu.sh b/developer/source/login/logout_subu.sh deleted file mode 100644 index a4a4206..0000000 --- a/developer/source/login/logout_subu.sh +++ /dev/null @@ -1,3 +0,0 @@ -# As root: -# loginctl terminate-user Thomas-US -loginctl terminate-user $1 diff --git a/developer/source/manager.tgz b/developer/source/manager.tgz deleted file mode 100644 index d585797..0000000 Binary files a/developer/source/manager.tgz and /dev/null differ diff --git a/developer/source/manager/CLI.py b/developer/source/manager/CLI.py deleted file mode 100755 index a79691e..0000000 --- a/developer/source/manager/CLI.py +++ /dev/null @@ -1,146 +0,0 @@ -#!/usr/bin/env python3 -# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*- -""" -CLI.py — thin command-line harness -Version: 0.2.0 -""" -import sys, argparse -from text import USAGE, HELP, EXAMPLE, VERSION -import core - -def CLI(argv=None) -> int: - argv = argv or sys.argv[1:] - if not argv: - print(USAGE) - return 0 - - # simple verbs that bypass argparse (so `help/version/example` always work) - simple = {"help": HELP, "--help": HELP, "-h": HELP, "usage": USAGE, "example": EXAMPLE, "version": VERSION} - if argv[0] in simple: - out = simple[argv[0]] - print(out if isinstance(out, str) else out()) - return 0 - - p = argparse.ArgumentParser(prog="subu", add_help=False) - p.add_argument("-V", "--Version", action="store_true", help="print version") - sub = p.add_subparsers(dest="verb") - - # init - ap = sub.add_parser("init") - ap.add_argument("token", nargs="?") - - # create/list/info - ap = sub.add_parser("create") - ap.add_argument("owner") - ap.add_argument("name") - - sub.add_parser("list") - ap = sub.add_parser("info"); ap.add_argument("subu_id") - ap = sub.add_parser("information"); ap.add_argument("subu_id") - - # lo - ap = sub.add_parser("lo") - ap.add_argument("state", choices=["up","down"]) - ap.add_argument("subu_id") - - # WG - ap = sub.add_parser("WG") - ap.add_argument("verb", choices=["global","create","server_provided_public_key","info","information","up","down"]) - ap.add_argument("arg1", nargs="?") - ap.add_argument("arg2", nargs="?") - - # attach/detach - ap = sub.add_parser("attach") - ap.add_argument("what", choices=["WG"]) - ap.add_argument("subu_id") - ap.add_argument("wg_id") - - ap = sub.add_parser("detach") - ap.add_argument("what", choices=["WG"]) - ap.add_argument("subu_id") - - # network - ap = sub.add_parser("network") - ap.add_argument("state", choices=["up","down"]) - ap.add_argument("subu_id") - - # option - ap = sub.add_parser("option") - ap.add_argument("verb", choices=["set","get","list"]) - ap.add_argument("subu_id") - ap.add_argument("name", nargs="?") - ap.add_argument("value", nargs="?") - - # exec - ap = sub.add_parser("exec") - ap.add_argument("subu_id") - ap.add_argument("--", dest="cmd", nargs=argparse.REMAINDER, default=[]) - - ns = p.parse_args(argv) - if ns.Version: - print(VERSION); return 0 - - try: - if ns.verb == "init": - return core.cmd_init(ns.token) - - if ns.verb == "create": - core.create_subu(ns.owner, ns.name); return 0 - if ns.verb == "list": - core.list_subu(); return 0 - if ns.verb in ("info","information"): - core.info_subu(ns.subu_id); return 0 - - if ns.verb == "lo": - core.lo_toggle(ns.subu_id, ns.state); return 0 - - if ns.verb == "WG": - v = ns.verb - if ns.arg1 is None and v in ("info","information"): - print("WG info requires WG_ID"); return 2 - if v == "global": - core.wg_global(ns.arg1); return 0 - if v == "create": - wid = core.wg_create(ns.arg1); print(wid); return 0 - if v == "server_provided_public_key": - core.wg_set_pubkey(ns.arg1, ns.arg2); return 0 - if v in ("info","information"): - core.wg_info(ns.arg1); return 0 - if v == "up": - core.wg_up(ns.arg1); return 0 - if v == "down": - core.wg_down(ns.arg1); return 0 - - if ns.verb == "attach": - if ns.what == "WG": - core.attach_wg(ns.subu_id, ns.wg_id); return 0 - - if ns.verb == "detach": - if ns.what == "WG": - core.detach_wg(ns.subu_id); return 0 - - if ns.verb == "network": - core.network_toggle(ns.subu_id, ns.state); return 0 - - if ns.verb == "option": - if ns.verb == "option" and ns.name is None and ns.value is None and ns.verb == "list": - core.option_list(ns.subu_id); return 0 - if ns.verb == "set": - core.option_set(ns.subu_id, ns.name, ns.value); return 0 - if ns.verb == "get": - core.option_get(ns.subu_id, ns.name); return 0 - if ns.verb == "list": - core.option_list(ns.subu_id); return 0 - - if ns.verb == "exec": - if not ns.cmd: - print("subu exec -- ..."); return 2 - core.exec_in_subu(ns.subu_id, ns.cmd); return 0 - - print(USAGE); return 2 - except Exception as e: - print(f"error: {e}") - return 1 - -if __name__ == "__main__": - sys.exit(CLI()) diff --git a/developer/source/manager/bpf_force_egress.c b/developer/source/manager/bpf_force_egress.c deleted file mode 100644 index c3aedec..0000000 --- a/developer/source/manager/bpf_force_egress.c +++ /dev/null @@ -1,43 +0,0 @@ -// -*- mode: c; c-basic-offset: 2; indent-tabs-mode: nil; tab-width: 8 -*- -// bpf_force_egress.c — MVP scaffold to validate UID and prep metadata -// Version 0.2.0 -#include -#include -#include - -char LICENSE[] SEC("license") = "GPL"; - -struct { - __uint(type, BPF_MAP_TYPE_HASH); - __type(key, __u32); // tgid - __type(value, __u32); // reserved (target ifindex placeholder) - __uint(max_entries, 1024); -} subu_tgid2if SEC(".maps"); - -// Helper: return 0 = allow, <0 reject -static __always_inline int allow_uid(struct bpf_sock_addr *ctx) { - // MVP: just accept everyone; you can gate on UID 2017 with bpf_get_current_uid_gid() - // __u32 uid = (__u32)(bpf_get_current_uid_gid() & 0xffffffff); - // if (uid != 2017) return -1; - return 0; -} - -// Hook: cgroup/connect4 — runs before connect(2) proceeds -SEC("cgroup/connect4") -int subu_connect4(struct bpf_sock_addr *ctx) -{ - if (allow_uid(ctx) < 0) return -1; - // Future: read pinned map/meta, set SO_* via bpf_setsockopt when permitted - return 0; -} - -// Hook: cgroup/post_bind4 — runs after a local bind is chosen -SEC("cgroup/post_bind4") -int subu_post_bind4(struct bpf_sock *sk) -{ - // Future: enforce bound dev if kernel helper allows; record tgid->ifindex - __u32 tgid = bpf_get_current_pid_tgid() >> 32; - __u32 val = 0; - bpf_map_update_elem(&subu_tgid2if, &tgid, &val, BPF_ANY); - return 0; -} diff --git a/developer/source/manager/core.py b/developer/source/manager/core.py deleted file mode 100644 index c363ec2..0000000 --- a/developer/source/manager/core.py +++ /dev/null @@ -1,254 +0,0 @@ -# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*- -""" -core.py — worker API for subu manager -Version: 0.2.0 -""" -import os, sqlite3, subprocess -from pathlib import Path -from contextlib import closing -from text import VERSION -from worker_bpf import ensure_mounts, install_steering, remove_steering, BpfError - -DB_FILE = Path("./subu.db") -WG_GLOBAL_FILE = Path("./WG_GLOBAL") - -def run(cmd, check=True): - r = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - if check and r.returncode != 0: - raise RuntimeError(f"cmd failed: {' '.join(cmd)}\n{r.stderr}") - return r.stdout.strip() - -# ---------------- DB ---------------- -def _db(): - if not DB_FILE.exists(): - raise FileNotFoundError("subu.db not found; run `subu init ` first") - return sqlite3.connect(DB_FILE) - -def cmd_init(token: str|None): - if DB_FILE.exists(): - raise FileExistsError("db already exists") - if not token or len(token) < 6: - raise ValueError("init requires a 6+ char token") - with closing(sqlite3.connect(DB_FILE)) as db: - c = db.cursor() - c.executescript(""" - CREATE TABLE subu ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - owner TEXT, - name TEXT, - netns TEXT, - lo_state TEXT DEFAULT 'down', - wg_id INTEGER, - network_state TEXT DEFAULT 'down' - ); - CREATE TABLE wg ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - endpoint TEXT, - local_ip TEXT, - allowed_ips TEXT, - pubkey TEXT, - state TEXT DEFAULT 'down' - ); - CREATE TABLE options ( - subu_id INTEGER, - name TEXT, - value TEXT, - PRIMARY KEY (subu_id, name) - ); - """) - db.commit() - print(f"created subu.db (v{VERSION})") - -# ------------- Subu ops ------------- -def create_subu(owner: str, name: str) -> str: - with closing(_db()) as db: - c = db.cursor() - subu_netns = f"ns-subu_tmp" # temp; we rename after ID known - c.execute("INSERT INTO subu (owner, name, netns) VALUES (?, ?, ?)", - (owner, name, subu_netns)) - sid = c.lastrowid - netns = f"ns-subu_{sid}" - c.execute("UPDATE subu SET netns=? WHERE id=?", (netns, sid)) - db.commit() - - # create netns - run(["ip", "netns", "add", netns]) - run(["ip", "-n", netns, "link", "set", "lo", "down"]) - print(f"Created subu_{sid} ({owner}:{name}) with netns {netns}") - return f"subu_{sid}" - -def list_subu(): - with closing(_db()) as db: - for row in db.execute("SELECT id, owner, name, netns, lo_state, wg_id, network_state FROM subu"): - print(row) - -def info_subu(subu_id: str): - sid = int(subu_id.split("_")[1]) - with closing(_db()) as db: - row = db.execute("SELECT * FROM subu WHERE id=?", (sid,)).fetchone() - if not row: - print("not found"); return - print(row) - wg = db.execute("SELECT wg_id FROM subu WHERE id=?", (sid,)).fetchone()[0] - if wg is not None: - wrow = db.execute("SELECT * FROM wg WHERE id=?", (wg,)).fetchone() - print("WG:", wrow) - opts = db.execute("SELECT name,value FROM options WHERE subu_id=?", (sid,)).fetchall() - print("Options:", opts) - -def lo_toggle(subu_id: str, state: str): - sid = int(subu_id.split("_")[1]) - with closing(_db()) as db: - ns = db.execute("SELECT netns FROM subu WHERE id=?", (sid,)).fetchone() - if not ns: raise ValueError("subu not found") - ns = ns[0] - run(["ip", "netns", "exec", ns, "ip", "link", "set", "lo", state]) - db.execute("UPDATE subu SET lo_state=? WHERE id=?", (state, sid)) - db.commit() - print(f"{subu_id}: lo {state}") - -# ------------- WG ops --------------- -def wg_global(basecidr: str): - WG_GLOBAL_FILE.write_text(basecidr.strip()+"\n") - print(f"WG pool base = {basecidr}") - -def _alloc_ip(idx: int, base: str) -> str: - # simplistic /24 allocator: base must be x.y.z.0/24 - prefix = base.split("/")[0].rsplit(".", 1)[0] - host = 2 + idx - return f"{prefix}.{host}/32" - -def wg_create(endpoint: str) -> str: - if not WG_GLOBAL_FILE.exists(): - raise RuntimeError("set WG base with `subu WG global ` first") - base = WG_GLOBAL_FILE.read_text().strip() - with closing(_db()) as db: - c = db.cursor() - idx = c.execute("SELECT COUNT(*) FROM wg").fetchone()[0] - local_ip = _alloc_ip(idx, base) - c.execute("INSERT INTO wg (endpoint, local_ip, allowed_ips) VALUES (?, ?, ?)", - (endpoint, local_ip, "0.0.0.0/0")) - wid = c.lastrowid - db.commit() - print(f"WG_{wid} endpoint={endpoint} ip={local_ip}") - return f"WG_{wid}" - -def wg_set_pubkey(wg_id: str, key: str): - wid = int(wg_id.split("_")[1]) - with closing(_db()) as db: - db.execute("UPDATE wg SET pubkey=? WHERE id=?", (key, wid)) - db.commit() - print("ok") - -def wg_info(wg_id: str): - wid = int(wg_id.split("_")[1]) - with closing(_db()) as db: - row = db.execute("SELECT * FROM wg WHERE id=?", (wid,)).fetchone() - print(row if row else "not found") - -def wg_up(wg_id: str): - wid = int(wg_id.split("_")[1]) - # Admin-up of WG device handled via network_toggle once attached. - print(f"{wg_id}: up (noop until attached)") - -def wg_down(wg_id: str): - wid = int(wg_id.split("_")[1]) - print(f"{wg_id}: down (noop until attached)") - -# ---------- attach/detach + BPF ---------- -def attach_wg(subu_id: str, wg_id: str): - ensure_mounts() - sid = int(subu_id.split("_")[1]); wid = int(wg_id.split("_")[1]) - with closing(_db()) as db: - r = db.execute("SELECT netns FROM subu WHERE id=?", (sid,)).fetchone() - if not r: raise ValueError("subu not found") - ns = r[0] - w = db.execute("SELECT endpoint, local_ip, pubkey FROM wg WHERE id=?", (wid,)).fetchone() - if not w: raise ValueError("WG not found") - endpoint, local_ip, pubkey = w - - ifname = f"subu_{wid}" - # create WG link in init ns, move to netns - run(["ip", "link", "add", ifname, "type", "wireguard"]) - run(["ip", "link", "set", ifname, "netns", ns]) - run(["ip", "-n", ns, "addr", "add", local_ip, "dev", ifname], check=False) - run(["ip", "-n", ns, "link", "set", "dev", ifname, "mtu", "1420"]) - run(["ip", "-n", ns, "link", "set", "dev", ifname, "down"]) # keep engine down until `network up` - - # install steering (MVP: create cgroup + attach bpf program) - try: - install_steering(subu_id, ns, ifname) - print(f"{subu_id}: eBPF steering installed -> {ifname}") - except BpfError as e: - print(f"{subu_id}: steering warning: {e}") - - with closing(_db()) as db: - db.execute("UPDATE subu SET wg_id=? WHERE id=?", (wid, sid)) - db.commit() - print(f"attached {wg_id} to {subu_id} in {ns} as {ifname}") - -def detach_wg(subu_id: str): - ensure_mounts() - sid = int(subu_id.split("_")[1]) - with closing(_db()) as db: - r = db.execute("SELECT netns,wg_id FROM subu WHERE id=?", (sid,)).fetchone() - if not r: print("not found"); return - ns, wid = r - if wid is None: - print("nothing attached"); return - ifname = f"subu_{wid}" - run(["ip", "-n", ns, "link", "del", ifname], check=False) - try: - remove_steering(subu_id) - except BpfError as e: - print(f"steering remove warn: {e}") - with closing(_db()) as db: - db.execute("UPDATE subu SET wg_id=NULL WHERE id=?", (sid,)) - db.commit() - print(f"detached WG_{wid} from {subu_id}") - -# ------------- network up/down ------------- -def network_toggle(subu_id: str, state: str): - sid = int(subu_id.split("_")[1]) - with closing(_db()) as db: - ns, wid = db.execute("SELECT netns,wg_id FROM subu WHERE id=?", (sid,)).fetchone() - # always make sure lo up on 'up' - if state == "up": - run(["ip", "netns", "exec", ns, "ip", "link", "set", "lo", "up"], check=False) - if wid is not None: - ifname = f"subu_{wid}" - run(["ip", "-n", ns, "link", "set", "dev", ifname, state], check=False) - with closing(_db()) as db: - db.execute("UPDATE subu SET network_state=? WHERE id=?", (state, sid)) - db.commit() - print(f"{subu_id}: network {state}") - -# ------------- options ---------------- -def option_set(subu_id: str, name: str, value: str): - sid = int(subu_id.split("_")[1]) - with closing(_db()) as db: - db.execute("INSERT INTO options (subu_id,name,value) VALUES(?,?,?) " - "ON CONFLICT(subu_id,name) DO UPDATE SET value=excluded.value", - (sid, name, value)) - db.commit() - print("ok") - -def option_get(subu_id: str, name: str): - sid = int(subu_id.split("_")[1]) - with closing(_db()) as db: - row = db.execute("SELECT value FROM options WHERE subu_id=? AND name=?", (sid,name)).fetchone() - print(row[0] if row else "") - -def option_list(subu_id: str): - sid = int(subu_id.split("_")[1]) - with closing(_db()) as db: - rows = db.execute("SELECT name,value FROM options WHERE subu_id=?", (sid,)).fetchall() - for n,v in rows: - print(f"{n}={v}") - -# ------------- exec ------------------- -def exec_in_subu(subu_id: str, cmd: list): - sid = int(subu_id.split("_")[1]) - with closing(_db()) as db: - ns = db.execute("SELECT netns FROM subu WHERE id=?", (sid,)).fetchone()[0] - os.execvp("ip", ["ip","netns","exec", ns] + cmd) diff --git a/developer/source/manager/subu b/developer/source/manager/subu deleted file mode 120000 index 45a8ec1..0000000 --- a/developer/source/manager/subu +++ /dev/null @@ -1 +0,0 @@ -CLI.py \ No newline at end of file diff --git a/developer/source/manager/temp.sh b/developer/source/manager/temp.sh deleted file mode 100644 index 36855b6..0000000 --- a/developer/source/manager/temp.sh +++ /dev/null @@ -1,40 +0,0 @@ -# from: /home/Thomas/subu_data/developer/project/active/subu/developer/source/manager - -set -euo pipefail - -echo "== 1) Backup legacy-prefixed modules ==" -mkdir -p _old_prefixed -for f in subu_*.py; do - [ -f "$f" ] && mv -v "$f" _old_prefixed/ -done -[ -f subu_worker_bpf.py ] && mv -v subu_worker_bpf.py _old_prefixed/ || true - -echo "== 2) Ensure only the new module names remain ==" -# Keep these (already present in your tar): -# CLI.py core.py text.py worker_bpf.py bpf_force_egress.c -ls -1 - -echo "== 3) Make CLI runnable as 'subu' ==" -# Make sure CLI has a shebang; add if missing -if ! head -n1 CLI.py | grep -q '^#!/usr/bin/env python3'; then - (printf '%s\n' '#!/usr/bin/env python3' ; cat CLI.py) > .CLI.tmp && mv .CLI.tmp CLI.py -fi -chmod +x CLI.py -ln -sf CLI.py subu -chmod +x subu - -echo "== 4) Quick import sanity ==" -# Fail if any of the remaining files still import the old module names -bad=$(grep -R --line-number -E 'import +subu_|from +subu_' -- *.py || true) -if [ -n "$bad" ]; then - echo "Found old-style imports; please fix:" >&2 - echo "$bad" >&2 - exit 1 -fi - -echo "== 5) Show version and help ==" -./subu version || true -./subu help || true -./subu || true # should print usage by default - -echo "== Done. If this looks good, you can delete _old_prefixed when ready. ==" diff --git a/developer/source/manager/test.sh b/developer/source/manager/test.sh deleted file mode 100644 index 706250b..0000000 --- a/developer/source/manager/test.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/env bash - -set -x -./CLI # -> USAGE (exit 0) -./CLI usage # -> USAGE -./CLI -h # -> HELP -./CLI --help # -> HELP -./CLI help # -> HELP -./CLI help WG # -> WG topic help (or full HELP if topic unknown) -./CLI example # -> EXAMPLE -./CLI version # -> 0.1.4 -./CLI -V # -> 0.1.4 - diff --git a/developer/source/manager/test_0.sh b/developer/source/manager/test_0.sh deleted file mode 100755 index ac354d3..0000000 --- a/developer/source/manager/test_0.sh +++ /dev/null @@ -1,11 +0,0 @@ -set -x -./subu.py # -> USAGE (exit 0) -./subu.py usage # -> USAGE -./subu.py -h # -> HELP -./subu.py --help # -> HELP -./subu.py help # -> HELP -./subu.py help WG # -> WG topic help (or full HELP if topic unknown) -./subu.py example # -> EXAMPLE -./subu.py version # -> 0.1.4 -./subu.py -V # -> 0.1.4 -set +x diff --git a/developer/source/manager/test_0_expected.sh b/developer/source/manager/test_0_expected.sh deleted file mode 100644 index 8e31ed5..0000000 --- a/developer/source/manager/test_0_expected.sh +++ /dev/null @@ -1,353 +0,0 @@ -++ ./subu.py -usage: subu [-V] [] - -Quick verbs: - usage Show this usage summary - help [topic] Detailed help; same as -h / --help - example End-to-end example session - version Print version - -Main verbs: - init Initialize a new subu database (refuses if it exists) - create Create a minimal subu record (defaults only) - info | information Show details for a subu - WG WireGuard object operations - attach Attach a WG object to a subu (netns + cgroup/eBPF) - detach Detach WG from a subu - network Bring all attached ifaces up/down inside the subu netns - lo Bring loopback up/down inside the subu netns - option Persisted options (list/set/get for future policy) - exec Run a command inside the subu netns - -Tip: `subu help` (or `subu --help`) shows detailed help; `subu help WG` shows topic help. -++ ./subu.py usage -usage: subu [-V] [] - -Quick verbs: - usage Show this usage summary - help [topic] Detailed help; same as -h / --help - example End-to-end example session - version Print version - -Main verbs: - init Initialize a new subu database (refuses if it exists) - create Create a minimal subu record (defaults only) - info | information Show details for a subu - WG WireGuard object operations - attach Attach a WG object to a subu (netns + cgroup/eBPF) - detach Detach WG from a subu - network Bring all attached ifaces up/down inside the subu netns - lo Bring loopback up/down inside the subu netns - option Persisted options (list/set/get for future policy) - exec Run a command inside the subu netns - -Tip: `subu help` (or `subu --help`) shows detailed help; `subu help WG` shows topic help. -++ ./subu.py -h -subu — manage subu containers, namespaces, and WG attachments - -2.1 Core - - subu init - Create ./subu.db (tables: subu, wg, links, options, state). - Requires a 6-char token (e.g., dzkq7b). Refuses if DB already exists. - - subu create - Make a default subu with netns ns- containing lo only (down). - Returns subu_N. - - subu list - Columns: Subu_ID, Owner, Name, NetNS, WG_Attached?, Up/Down, Steer? - - subu info | subu information - Full record + attached WG(s) + options + iface states. - -2.2 Loopback - - subu lo up | subu lo down - Toggle loopback inside the subu’s netns. - -2.3 WireGuard objects (independent) - - subu WG global - e.g., 192.168.112.0/24; allocator hands out /32 peers sequentially. - Shows current base and next free on success. - - subu WG create - Creates WG object; allocates next /32 local IP; AllowedIPs=0.0.0.0/0. - Returns WG_M. - - subu WG server_provided_public_key - Stores server’s pubkey. - - subu WG info | subu WG information - Endpoint, allocated IP, pubkey set?, link state (admin/oper). - -2.4 Link WG ↔ subu, bring up/down - - subu attach WG - Creates/configures WG device inside ns-: - - device name: subu_ (M from WG_ID) - - set local /32, MTU 1420, accept_local=1 - - (no default route is added — steering uses eBPF) - - v1: enforce one WG per Subu; error if another attached - - subu detach WG - Remove WG device/config from the subu’s netns; keep WG object. - - subu WG up | subu WG down - Toggle interface admin state in the subu’s netns (must be attached). - - subu network up | subu network down - Only toggles admin state for all attached ifaces. On “up”, loopback - is brought up first automatically. No route manipulation. - -2.5 Execution & (future) steering - - subu exec -- … - Run a process inside the subu’s netns. - - subu steer enable | subu steer disable - (Future) Attach/detach eBPF cgroup programs to force SO_BINDTOIFINDEX=subu_ - for TCP/UDP. Default: disabled. - -2.6 Options (persist only, for future policy) - - subu option list - subu option get [name] - subu option set - -2.7 Meta - - subu usage - Short usage summary (also printed when no args are given). - - subu help [topic] - This help (or per-topic help such as `subu help WG`). - - subu example - A concrete end-to-end scenario. - - subu version - Print version (same as -V / --version). -++ ./subu.py --help -subu — manage subu containers, namespaces, and WG attachments - -2.1 Core - - subu init - Create ./subu.db (tables: subu, wg, links, options, state). - Requires a 6-char token (e.g., dzkq7b). Refuses if DB already exists. - - subu create - Make a default subu with netns ns- containing lo only (down). - Returns subu_N. - - subu list - Columns: Subu_ID, Owner, Name, NetNS, WG_Attached?, Up/Down, Steer? - - subu info | subu information - Full record + attached WG(s) + options + iface states. - -2.2 Loopback - - subu lo up | subu lo down - Toggle loopback inside the subu’s netns. - -2.3 WireGuard objects (independent) - - subu WG global - e.g., 192.168.112.0/24; allocator hands out /32 peers sequentially. - Shows current base and next free on success. - - subu WG create - Creates WG object; allocates next /32 local IP; AllowedIPs=0.0.0.0/0. - Returns WG_M. - - subu WG server_provided_public_key - Stores server’s pubkey. - - subu WG info | subu WG information - Endpoint, allocated IP, pubkey set?, link state (admin/oper). - -2.4 Link WG ↔ subu, bring up/down - - subu attach WG - Creates/configures WG device inside ns-: - - device name: subu_ (M from WG_ID) - - set local /32, MTU 1420, accept_local=1 - - (no default route is added — steering uses eBPF) - - v1: enforce one WG per Subu; error if another attached - - subu detach WG - Remove WG device/config from the subu’s netns; keep WG object. - - subu WG up | subu WG down - Toggle interface admin state in the subu’s netns (must be attached). - - subu network up | subu network down - Only toggles admin state for all attached ifaces. On “up”, loopback - is brought up first automatically. No route manipulation. - -2.5 Execution & (future) steering - - subu exec -- … - Run a process inside the subu’s netns. - - subu steer enable | subu steer disable - (Future) Attach/detach eBPF cgroup programs to force SO_BINDTOIFINDEX=subu_ - for TCP/UDP. Default: disabled. - -2.6 Options (persist only, for future policy) - - subu option list - subu option get [name] - subu option set - -2.7 Meta - - subu usage - Short usage summary (also printed when no args are given). - - subu help [topic] - This help (or per-topic help such as `subu help WG`). - - subu example - A concrete end-to-end scenario. - - subu version - Print version (same as -V / --version). -++ ./subu.py help -subu — manage subu containers, namespaces, and WG attachments - -2.1 Core - - subu init - Create ./subu.db (tables: subu, wg, links, options, state). - Requires a 6-char token (e.g., dzkq7b). Refuses if DB already exists. - - subu create - Make a default subu with netns ns- containing lo only (down). - Returns subu_N. - - subu list - Columns: Subu_ID, Owner, Name, NetNS, WG_Attached?, Up/Down, Steer? - - subu info | subu information - Full record + attached WG(s) + options + iface states. - -2.2 Loopback - - subu lo up | subu lo down - Toggle loopback inside the subu’s netns. - -2.3 WireGuard objects (independent) - - subu WG global - e.g., 192.168.112.0/24; allocator hands out /32 peers sequentially. - Shows current base and next free on success. - - subu WG create - Creates WG object; allocates next /32 local IP; AllowedIPs=0.0.0.0/0. - Returns WG_M. - - subu WG server_provided_public_key - Stores server’s pubkey. - - subu WG info | subu WG information - Endpoint, allocated IP, pubkey set?, link state (admin/oper). - -2.4 Link WG ↔ subu, bring up/down - - subu attach WG - Creates/configures WG device inside ns-: - - device name: subu_ (M from WG_ID) - - set local /32, MTU 1420, accept_local=1 - - (no default route is added — steering uses eBPF) - - v1: enforce one WG per Subu; error if another attached - - subu detach WG - Remove WG device/config from the subu’s netns; keep WG object. - - subu WG up | subu WG down - Toggle interface admin state in the subu’s netns (must be attached). - - subu network up | subu network down - Only toggles admin state for all attached ifaces. On “up”, loopback - is brought up first automatically. No route manipulation. - -2.5 Execution & (future) steering - - subu exec -- … - Run a process inside the subu’s netns. - - subu steer enable | subu steer disable - (Future) Attach/detach eBPF cgroup programs to force SO_BINDTOIFINDEX=subu_ - for TCP/UDP. Default: disabled. - -2.6 Options (persist only, for future policy) - - subu option list - subu option get [name] - subu option set - -2.7 Meta - - subu usage - Short usage summary (also printed when no args are given). - - subu help [topic] - This help (or per-topic help such as `subu help WG`). - - subu example - A concrete end-to-end scenario. - - subu version - Print version (same as -V / --version). -++ ./subu.py help WG -usage: subu WG [-h] - -options: - -h, --help show this help message and exit -++ ./subu.py example -# 0) Safe init (refuses if ./subu.db exists) -subu init dzkq7b -# -> created ./subu.db - -# 1) Create Subu -subu create Thomas US -# -> Subu_ID: subu_7 -# -> netns: ns-subu_7 with lo (down) - -# 2) Define WG pool (once per host) -subu WG global 192.168.112.0/24 -# -> base set; next free: 192.168.112.2/32 - -# 3) Create WG object with endpoint -subu WG create ReasoningTechnology.com:51820 -# -> WG_ID: WG_0 -# -> local IP: 192.168.112.2/32 -# -> AllowedIPs: 0.0.0.0/0 - -# 4) Add server public key -subu WG server_provided_public_key WG_0 ABCDEFG...xyz= -# -> saved - -# 5) Attach WG to Subu (device created/configured in ns) -subu attach WG subu_7 WG_0 -# -> device ns-subu_7/subu_0 configured (no default route) - -# 6) Bring network up (lo first, then attached ifaces) -subu network up subu_7 -# -> lo up; subu_0 admin up - -# 7) Start the WG engine inside the netns -subu WG up WG_0 -# -> up, handshakes should start - -# 8) Test from inside the subu -subu exec subu_7 -- curl -4v https://ifconfig.me -++ ./subu.py version -0.1.3 -++ ./subu.py -V -0.1.3 -++ set +x diff --git a/developer/source/manager/text.py b/developer/source/manager/text.py deleted file mode 100644 index 84f6762..0000000 --- a/developer/source/manager/text.py +++ /dev/null @@ -1,109 +0,0 @@ -# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*- -VERSION = "0.2.0" - -USAGE = """\ -subu — Subu manager (v0.2.0) - -Usage: - subu # usage - subu help # detailed help - subu example # example workflow - subu version # print version - - subu init - subu create - subu list - subu info | subu information - - subu lo up|down - - subu WG global - subu WG create - subu WG server_provided_public_key - subu WG info|information - subu WG up - subu WG down - - subu attach WG - subu detach WG - - subu network up|down - - subu option set - subu option get - subu option list - - subu exec -- ... -""" - -HELP = """\ -Subu manager (v0.2.0) - -1) Init - subu init - Creates ./subu.db. Refuses to run if db exists. - -2) Subu - subu create - subu list - subu info - -3) Loopback - subu lo up|down - -4) WireGuard objects (independent of subu) - subu WG global # e.g., 192.168.112.0/24 - subu WG create # allocates next /32 - subu WG server_provided_public_key - subu WG info - subu WG up / subu WG down # admin toggle after attached - -5) Attach/detach + eBPF steering - subu attach WG - - Creates WG dev as subu_ inside ns-subu_, assigns /32, MTU 1420 - - Installs per-subu cgroup + loads eBPF scaffold (UID check, metadata map) - - Keeps device admin-down until `subu network up` - subu detach WG - - Deletes device, removes cgroup + BPF - -6) Network aggregate - subu network up|down - - Ensures lo up on 'up', toggles attached WG ifaces - -7) Options - subu option set|get|list ... - -8) Exec - subu exec -- ... -""" - -EXAMPLE = """\ -# 0) Init -subu init dzkq7b - -# 1) Create Subu -subu create Thomas US -# -> subu_1 - -# 2) WG pool once -subu WG global 192.168.112.0/24 - -# 3) Create WG object with endpoint -subu WG create ReasoningTechnology.com:51820 -# -> WG_1 - -# 4) Pubkey (placeholder) -subu WG server_provided_public_key WG_1 ABCDEFG...xyz= - -# 5) Attach device and install cgroup+BPF steering -subu attach WG subu_1 WG_1 - -# 6) Bring network up (lo + WG) -subu network up subu_1 - -# 7) Test inside ns -subu exec subu_1 -- curl -4v https://ifconfig.me -""" - -def VERSION_string(): - return VERSION diff --git a/developer/source/manager/worker_bpf.py b/developer/source/manager/worker_bpf.py deleted file mode 100644 index 96aef14..0000000 --- a/developer/source/manager/worker_bpf.py +++ /dev/null @@ -1,78 +0,0 @@ -# -*- mode: python; coding: utf-8; python-indent-offset: 2; indent-tabs-mode: nil -*- -""" -worker_bpf.py — create per-subu cgroups and load eBPF (MVP) -Version: 0.2.0 -""" -import os, subprocess, json -from pathlib import Path - -class BpfError(RuntimeError): pass - -def run(cmd, check=True): - r = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - if check and r.returncode != 0: - raise BpfError(f"cmd failed: {' '.join(cmd)}\n{r.stderr}") - return r.stdout.strip() - -def ensure_mounts(): - # ensure bpf and cgroup v2 are mounted - try: - Path("/sys/fs/bpf").mkdir(parents=True, exist_ok=True) - run(["mount","-t","bpf","bpf","/sys/fs/bpf"], check=False) - except Exception: - pass - try: - Path("/sys/fs/cgroup").mkdir(parents=True, exist_ok=True) - run(["mount","-t","cgroup2","none","/sys/fs/cgroup"], check=False) - except Exception: - pass - -def cgroup_path(subu_id: str) -> str: - return f"/sys/fs/cgroup/{subu_id}" - -def install_steering(subu_id: str, netns: str, ifname: str): - ensure_mounts() - cg = Path(cgroup_path(subu_id)) - cg.mkdir(parents=True, exist_ok=True) - - # compile BPF - obj = Path("./bpf_force_egress.o") - src = Path("./bpf_force_egress.c") - if not src.exists(): - raise BpfError("bpf_force_egress.c missing next to manager") - - # Build object (requires clang/llc/bpftool) - run(["clang","-O2","-g","-target","bpf","-c",str(src),"-o",str(obj)]) - - # Load program into bpffs; attach to cgroup/inet4_connect + inet4_post_bind (MVP) - pinned = f"/sys/fs/bpf/{subu_id}_egress" - run(["bpftool","prog","loadall",str(obj),pinned], check=True) - - # Attach to hooks (MVP validation hooks) - # NOTE: these are safe no-ops for now; they validate UID and stash ifindex map. - for hook in ("cgroup/connect4","cgroup/post_bind4"): - run(["bpftool","cgroup","attach",cgroup_path(subu_id),"attach",hook,"pinned",f"{pinned}/prog_0"], check=False) - - # Write metadata for ifname (saved for future prog versions) - meta = {"ifname": ifname} - Path(f"/sys/fs/bpf/{subu_id}_meta.json").write_text(json.dumps(meta)) - -def remove_steering(subu_id: str): - cg = cgroup_path(subu_id) - # Detach whatever is attached - for hook in ("cgroup/connect4","cgroup/post_bind4"): - subprocess.run(["bpftool","cgroup","detach",cg,"detach",hook], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - # Remove pinned prog dir - pinned = Path(f"/sys/fs/bpf/{subu_id}_egress") - if pinned.exists(): - subprocess.run(["bpftool","prog","detach",str(pinned)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - try: - for p in pinned.glob("*"): p.unlink() - pinned.rmdir() - except Exception: - pass - # Remove cgroup dir - try: - Path(cg).rmdir() - except Exception: - pass diff --git a/developer/source/mount/README.org b/developer/source/mount/README.org deleted file mode 100644 index 73fd09f..0000000 --- a/developer/source/mount/README.org +++ /dev/null @@ -1,149 +0,0 @@ - -When the master users are not remote mounted, but rather have static home directories on the local machine, these scripts are not needed. - -masu == master user -subu == sub user - -These 'mount' scripts are for keeping master users on a remote, optionally encrypted, device. - -I have one example running, so there might be generalization issues with these scripts. - -The order and type of the command arguments are the first part of each -command name. The command description follows. For example: - -`map_name__mounted_masu_list.sh` - -is given a `map_name` and provides a list of the masu found at the mount point with that name. (Note, that /dev/mapper/ is mounted at /mnt/.) - - -For mounting a remote device. - -The remote device has at the top level master user home directories. Each master user home directory has a sub directory called subu_data. Each directory in subu_data is owned by a sub-user, and is the home directory for that sub-user. Each master user also has a `subu` sub-directory. The contents of the subu directory will parallel that of the subu_data directory, but differ in that the contained files will be owned by the master user. - -1. the remote device is mounted under /mnt/map_name - - `device_mapname__open_mount.sh` ` - - `mapname` is the name that appears under /dev/mapper, it is also used - as the /mnt/ mount point. - - if is not to be opened with cryptsetup, instead mount it to the mount pt directly. - -2. Each master user is mount --bind from the /mnt/ to /home - - masu__to_home.sh - - There is currently no script to map bind all the masu in one call. - -3. master users have subu home directories under /home//subu_data. These - are owned by the subu users. Map the /home//subu_data directories to - `/home/subu`n using - - masu__map_own_all.sh - - The `/home/subu` directories are bindfs mapped to be be owned by the master - user, so that the master user may administer them. - - - -* bindfs problem - - On my Fedora install once it was bound by root, it stayed there. On - Debian it keeps losing the mounts, for sure on a logout or login, - and maybe other times also --- the machinectl logins and logouts I - bet that is what is doing it. Hence very time I go to use a mount - in subu it is not there or filled with the dreaded question mark - garbage, has to be umount then remounted. - - -------------------------------------------------------------------------------- - You're exactly right in your diagnosis: Debian's behavior under - systemd with machinectl is unmounting user bind mounts on - login/logout cycles—especially when using enable-linger, systemd - --user, or machinectl shell. - - This differs from Fedora, which preserves those mounts more consistently unless explicitly torn down. Let’s first address why it’s happening, then provide a robust, recoverable fix. - 🔍 Why This Happens - - When machinectl or loginctl manages a user session: - - systemd creates a private mount namespace per user. - - Any mounts (especially bind mounts) not made within that - namespace or not managed by systemd itself (i.e. not via .mount - units) can be: - - Torn down on logout, - - Hidden due to namespace isolation, - - Disconnected if user lingering is toggled. - - If your bindfs mount is outside the user’s namespace (i.e. mounted by root), but then accessed inside a user-managed session, systemd may unmount it or lose visibility. - ✅ Recommended Fix - - There are two paths: - A. Use a systemd .mount unit (most stable) - - Create a file /etc/systemd/system/home-Thomas-subu-.mount: - - [Unit] - Description=Bind mount subu user home - - [Mount] - What=/home/Thomas/subu_data/ - Where=/home/Thomas/subu/ - Type=none - Options=bind - - [Install] - WantedBy=multi-user.target - - Enable and start it: - - sudo systemctl daemon-reexec - sudo systemctl enable --now home-Thomas-subu-.mount - - Repeat for each subu. - - This ensures systemd manages the bind mount in its native way and it won’t get torn down. - B. Remount automatically on login via ~/.bash_profile or a systemd --user unit - - You can modify your script to: - - Skip sudo if already root - - Be idempotent - - Automatically remount on session start - - Then in ~/.bash_profile or ~/.config/systemd/user/subu-mount.service: - - [Unit] - Description=Re-mount subu directory for Thomas - - [Service] - Type=oneshot - ExecStart=/home/Thomas/executable/subu/user_open_subu.sh Thomas US - - [Install] - WantedBy=default.target - - Enable with: - - systemctl --user daemon-reexec - systemctl --user enable --now subu-mount.service - - This guarantees it will come up as part of your login, even after a reboot. - 🔄 Temporary Workaround - - If you want a quick way to check all subu are mounted and remount them if not, write a watchdog script: - - #!/bin/bash - for subu in $(ls /home/Thomas/subu_data); do - mountpoint="/home/Thomas/subu/$subu" - if ! mountpoint -q "$mountpoint"; then - /home/Thomas/executable/subu/user_open_subu.sh Thomas "$subu" - fi - done - - Would you like me to auto-generate the .mount or .service files for each subu? We can script the system-wide setup cleanly. diff --git a/developer/source/mount/device_mapname__open_mount.sh b/developer/source/mount/device_mapname__open_mount.sh deleted file mode 100755 index b14ebe3..0000000 --- a/developer/source/mount/device_mapname__open_mount.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -if [[ $EUID -ne 0 ]]; then - echo "❌ This script must be run as root." >&2 - exit 1 -fi - -# Function to unlock and mount the device -device_mount() { - local device_node=$1 # e.g., /dev/sdb1 - local device_name=$2 # e.g., Zathustra - local mount_point="/mnt/$device_name" - - # Check if cryptsetup is installed - if ! command -v cryptsetup &> /dev/null; then - echo "Error: cryptsetup is not installed!" - return 1 - fi - - # Check if the device is already mounted - if mount | grep "on $mount_point" > /dev/null; then - echo "Device $device_name is already mounted at $mount_point." - return 0 - fi - - # Make sure the mount point exists - mkdir -p "$mount_point" - - # Unlock the encrypted device - sudo cryptsetup luksOpen "$device_node" "$device_name-crypt" - - # Mount the unlocked device - sudo mount "/dev/mapper/$device_name-crypt" "$mount_point" - - echo "$device_name mounted at $mount_point" -} - -# Run the function with the device node and device name as arguments -device_mount "$1" "$2" diff --git a/developer/source/mount/device_umount.sh b/developer/source/mount/device_umount.sh deleted file mode 100755 index 3eadc37..0000000 --- a/developer/source/mount/device_umount.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -if [[ $EUID -ne 0 ]]; then - echo "❌ This script must be run as root." >&2 - exit 1 -fi - -umount /mnt/"$1" -cryptsetup close "$1" diff --git a/developer/source/mount/disable_linger.sh b/developer/source/mount/disable_linger.sh deleted file mode 100755 index a9fffdd..0000000 --- a/developer/source/mount/disable_linger.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -# disable_linger_subu — turn off systemd --user lingering for all -* users -# Usage: sudo disable_linger_subu --masu Thomas - -set -euo pipefail -MASU="" -while [[ $# -gt 0 ]]; do - case "$1" in - --masu) MASU="${2:-}"; shift 2;; - *) echo "unknown arg: $1" >&2; exit 2;; - esac -done -[[ -n "$MASU" ]] || { echo "usage: sudo $0 --masu "; exit 2; } -[[ $EUID -eq 0 ]] || { echo "must run as root"; exit 1; } - -mapfile -t SUBU_USERS < <(getent passwd | awk -F: -v pfx="^${MASU}-" '$1 ~ pfx {print $1}' | sort) -for u in "${SUBU_USERS[@]}"; do - echo "loginctl disable-linger $u" - loginctl disable-linger "$u" || true -done - -echo "Current linger files (should be empty or only intentional users):" -ls -1 /var/lib/systemd/linger 2>/dev/null || echo "(none)" -echo "✅ linger disabled for ${#SUBU_USERS[@]} users" diff --git a/developer/source/mount/logout_subu.sh b/developer/source/mount/logout_subu.sh deleted file mode 100755 index 9e95cb2..0000000 --- a/developer/source/mount/logout_subu.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env bash -# logout_subu — cleanly stop subu users, tear down bindfs, unbind /home, unmount device, close LUKS -# Usage: -# sudo logout_subu --masu Thomas --device Eagle [--aggressive] [--dry-run] -# -# Notes: -# - Run from a directory NOT under /home/ (we'll auto 'cd /' if needed). -# - --aggressive enables pkill -KILL fallback if user@ sessions don't exit. -# - --device is the mapname mounted at /mnt/ and /dev/mapper/-crypt. - -set -euo pipefail - -MASU="" -DEVICE="" -AGGR=0 -DRY=0 - -while [[ $# -gt 0 ]]; do - case "$1" in - --masu) MASU="${2:-}"; shift 2;; - --device) DEVICE="${2:-}"; shift 2;; - --aggressive) AGGR=1; shift;; - --dry-run) DRY=1; shift;; - -h|--help) - grep -E '^(# |#-)' "$0" | sed 's/^# \{0,1\}//' - exit 0;; - *) echo "unknown arg: $1" >&2; exit 2;; - esac -done - -if [[ -z "$MASU" ]]; then - # best guess: current sudo user or login user - MASU="${SUDO_USER:-${USER:-}}" - [[ -n "$MASU" ]] || { echo "Set --masu "; exit 2; } -fi - -if [[ $EUID -ne 0 ]]; then - echo "❌ must run as root (sudo)"; exit 1 -fi - -# If we’re under /home/, move away so unmount can succeed -if [[ "$(pwd -P)" == /home/${MASU}* ]]; then - echo "cd / (leaving $(pwd -P) so unmounts can proceed)" - [[ $DRY -eq 1 ]] || cd / -fi - -say() { printf '%s\n' "$*"; } -doit() { echo "+ $*"; [[ $DRY -eq 1 ]] || eval "$@"; } - -# --- enumerate subu users and mountpoints -SUBU_ROOT="/home/${MASU}/subu" -SUBU_DATA="/home/${MASU}/subu_data" - -# Users of the form MASU-something that actually exist -mapfile -t SUBU_USERS < <(getent passwd | awk -F: -v pfx="^${MASU}-" '$1 ~ pfx {print $1}' | sort) - -# Bindfs targets (reverse depth for unmount) -mapfile -t SUBU_MPS < <(findmnt -Rn -S fuse.* -T "$SUBU_ROOT" -o TARGET 2>/dev/null | \ - awk -F/ '{print NF, $0}' | sort -rn | cut -d" " -f2-) - -say "== stop subu systemd user managers ==" -for u in "${SUBU_USERS[@]}"; do - say "terminating user@ for $u" - doit loginctl terminate-user "$u" || true -done - -# wait a moment and optionally KILL leftovers -sleep 0.5 -for u in "${SUBU_USERS[@]}"; do - if loginctl list-users --no-legend | awk '{print $2}' | grep -qx "$u"; then - if [[ $AGGR -eq 1 ]]; then - uid="$(id -u "$u" 2>/dev/null || echo "")" - if [[ -n "$uid" ]]; then - say "aggressive kill of UID $uid ($u)" - doit pkill -KILL -u "$uid" || true - fi - else - say "⚠︎ $u still has a user@ manager; rerun with --aggressive to force-kill" - fi - fi -done - -say "== unmount bindfs subu mounts under $SUBU_ROOT ==" -for mp in "${SUBU_MPS[@]}"; do - say "umount $mp" - if [[ $DRY -eq 1 ]]; then - echo "+ umount '$mp'" - else - if ! umount "$mp" 2>/dev/null; then - echo " (busy) trying lazy umount" - umount -l "$mp" || true - fi - fi -done - -# Unmount the MASU home if it is a bind of /mnt//user_data/ -say "== unmount MASU home bind (if any) ==" -if findmnt -n -T "/home/${MASU}" >/dev/null 2>&1; then - src="$(findmnt -no SOURCE -T "/home/${MASU}")" - say "/home/${MASU} source: ${src}" - say "umount /home/${MASU}" - doit umount "/home/${MASU}" || true -fi - -# If a device mapname was provided, unmount and close it -if [[ -n "$DEVICE" ]]; then - say "== unmount /mnt/${DEVICE} and close LUKS ==" - if findmnt -n "/mnt/${DEVICE}" >/dev/null 2>&1; then - say "umount /mnt/${DEVICE}" - doit umount "/mnt/${DEVICE}" || true - fi - if cryptsetup status "${DEVICE}-crypt" >/dev/null 2>&1; then - say "cryptsetup close ${DEVICE}-crypt" - doit cryptsetup close "${DEVICE}-crypt" || true - else - say "crypt mapping ${DEVICE}-crypt not active" - fi -fi - -say "sync disks" -[[ $DRY -eq 1 ]] || sync -say "✅ done" diff --git a/developer/source/mount/mapname__mounted_masu_list.sh b/developer/source/mount/mapname__mounted_masu_list.sh deleted file mode 100755 index b955b07..0000000 --- a/developer/source/mount/mapname__mounted_masu_list.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/usr/bin/env bash - -# Function to list users in the /mnt//user_data directory -device_user_list() { - local device=$1 - local user_data_dir="/mnt/$device/user_data" - - if [ ! -d "$user_data_dir" ]; then - echo "Error: $user_data_dir does not exist!" - return 1 - fi - - # List all user directories in the user_data directory - find "$user_data_dir" -maxdepth 1 -mindepth 1 -type d -exec basename {} \; -} - -# Run the function with the device name as an argument -device_user_list "$1" diff --git a/developer/source/mount/mapname_masu__to_home.sh b/developer/source/mount/mapname_masu__to_home.sh deleted file mode 100755 index b776626..0000000 --- a/developer/source/mount/mapname_masu__to_home.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -# Function to bind mount a user's data to /home/ -device_user_bind() { - local device=$1 - local user=$2 - local user_data_dir="/mnt/$device/user_data/$user" - local home_dir="/home/$user" - - if [ ! -d "$user_data_dir" ]; then - echo "Error: $user_data_dir does not exist!" - return 1 - fi - - # Create the home directory if it doesn't exist - mkdir -p "$home_dir" - - # Mount --bind the user data to the home directory - sudo mount --bind "$user_data_dir" "$home_dir" - echo "Mounted $user_data_dir -> $home_dir" -} - -# Run the function with the device name and user as arguments -device_user_bind "$1" "$2" diff --git a/developer/source/mount/masu__map_own_all.sh b/developer/source/mount/masu__map_own_all.sh deleted file mode 100755 index 0cdaa5b..0000000 --- a/developer/source/mount/masu__map_own_all.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -# usage: sudo ./masu__map_own_all.sh [--suid=US,x6] -set -euo pipefail -masu="${1:?usage: $0 [--suid=a,b]}" -suid_list="${2-}" - -want_suid_for() { - [[ "$suid_list" =~ ^--suid= ]] || return 1 - IFS=',' read -r -a arr <<< "${suid_list#--suid=}" - for n in "${arr[@]}"; do [[ "$n" == "$1" ]] && return 0; done - return 1 -} - -subus="$(./masu__subu_data_dir_list.sh "$masu")" -[[ -n "$subus" ]] || { echo "No sub-users found for $masu"; exit 1; } - -while IFS= read -r s; do - [[ -n "$s" ]] || continue - echo "Opening sub-user: $s" - if want_suid_for "$s"; then - sudo ./masu_subu__map_own.sh "$masu" "$s" --suid - else - sudo ./masu_subu__map_own.sh "$masu" "$s" - fi -done <<< "$subus" diff --git a/developer/source/mount/masu__subu_data_dir_list.sh b/developer/source/mount/masu__subu_data_dir_list.sh deleted file mode 100755 index de4f9a0..0000000 --- a/developer/source/mount/masu__subu_data_dir_list.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash - -# Function to list sub-users in /home//subu_data -subu_home_dir_list() { - local user=$1 - local subu_home_dir="/home/$user/subu_data" - - if [ ! -d "/home/$user" ]; then - echo "Error: /home/$user does not exist!" - return 1 - fi - - if [ ! -d "$subu_home_dir" ]; then - echo "Error: $subu_home_dir does not exist!" - return 1 - fi - - # List all sub-users in the subu directory - find "$subu_home_dir" -maxdepth 1 -mindepth 1 -type d -exec basename {} \; -} - -# Run the function with the user as an argument -subu_home_dir_list "$1" diff --git a/developer/source/mount/masu__subu_dir_list.sh b/developer/source/mount/masu__subu_dir_list.sh deleted file mode 100755 index 383ea57..0000000 --- a/developer/source/mount/masu__subu_dir_list.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -# masu__subu_dir_list.sh - -set -euo pipefail -user="${1:?usage: $0 }" - -# Prefer the /home//subu view; if empty/nonexistent, fall back to subu_data. -list_from_dir() { local d="$1"; [[ -d "$d" ]] && find "$d" -mindepth 1 -maxdepth 1 -type d -printf '%f\n' || true; } - -candidates="$( - list_from_dir "/home/$user/subu" - [[ -d "/home/$user/subu" && -n "$(ls -A /home/$user/subu 2>/dev/null || true)" ]] || list_from_dir "/home/$user/subu_data" -)" - -# Unique, stable order -printf '%s\n' "$candidates" | LC_ALL=C sort -u diff --git a/developer/source/mount/masu_subu__map_own.sh b/developer/source/mount/masu_subu__map_own.sh deleted file mode 100755 index 358f7fe..0000000 --- a/developer/source/mount/masu_subu__map_own.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bash -# usage: sudo ./masu_subu__map_own.sh [--suid] -set -euo pipefail - -masu="${1:?usage: $0 [--suid]}" -subu="${2:?usage: $0 [--suid]}" -want_suid=0; [[ "${3-}" == "--suid" ]] && want_suid=1 - -need(){ command -v "$1" >/dev/null 2>&1 || { echo "missing: $1" >&2; exit 1; }; } -need bindfs; need findmnt; need umount - -src="/home/$masu/subu_data/$subu" -mp="/home/$masu/subu/$subu" -[[ -d "$src" ]] || { echo "❌ source not found: $src" >&2; exit 1; } -mkdir -p "$mp" - -# mount options -base_opts="allow_other,default_permissions,exec" -opts="$base_opts,$([[ $want_suid -eq 1 ]] && echo suid || echo nosuid)" - -# fully unstack any prior bindfs at the target -while findmnt -rn -T "$mp" -t fuse.bindfs >/dev/null 2>&1; do - umount "$mp" 2>/dev/null || umount -l "$mp" || break - sleep 0.1 -done - -echo "mounting $src -> $mp (opts: $opts)" -bindfs -o "$opts" --map="${masu}-${subu}/${masu}:@${masu}-${subu}/@${masu}" "$src" "$mp" - -# verify (single line, kernel-only) -findmnt -rn -T "$mp" -S "$src" -o TARGET,SOURCE,FSTYPE,OPTIONS | head -n1 -echo "OK" -if [[ $want_suid -eq 1 ]]; then - echo "note: suid enabled at $mp" -else - echo "note: nosuid (default) — setuid will NOT take effect at $mp" -fi diff --git a/developer/source/mount/masu_subu__map_own_orig.sh b/developer/source/mount/masu_subu__map_own_orig.sh deleted file mode 100644 index 2a1b1f5..0000000 --- a/developer/source/mount/masu_subu__map_own_orig.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env bash - -# Function to bind mount with UID/GID mapping -subu_bind() { - local user=$1 - local subu=$2 - - # Check if bindfs is installed - if ! command -v bindfs &> /dev/null; then - echo "Error: bindfs is not installed!" - return 1 - fi - - # Get the username and group name for the main user - master_user_name=$user - master_group=$user - - # Get the username and group name for the sub-user - subu_user_name="${user}-${subu}" - subu_group="${user}-${subu}" - - # Check if the user and sub-user exist - if ! id "$master_user_name" &>/dev/null; then - echo "Error: User '$master_user_name' not found!" - return 1 - fi - if ! id "$subu_user_name" &>/dev/null; then - echo "Error: Sub-user '${master_user_name}-${subu}' not found!" - return 1 - fi - - # Directories to be bind-mounted - subu_data_path="/home/$user/subu_data/$subu" - subu_mount_point_path="/home/$user/subu/$subu" - - # Check if sub-user directory exists - if [ ! -d "$subu_data_path" ]; then - echo "Error: Sub-user directory '$subu_data_path' does not exist!" - return 1 - fi - - # Create the mount point if it doesn't exist - mkdir -p "$subu_mount_point_path" - - # Perform the bind mount using bindfs with UID/GID mapping - sudo bindfs\ - --map="$subu_user_name/$master_user_name:@$subu_group/@$master_group" \ - "$subu_data_path" \ - "$subu_mount_point_path" - - # Verify if the mount was successful - if [ $? -eq 0 ]; then - echo "Successfully bind-mounted $subu_data_path to $subu_mount_point_path with UID/GID mapping." - else - echo "Error: Failed to bind-mount $subu_data_path to $subu_mount_point_path, might already exist." - fi -} - -# Call the function with user and subu as arguments -subu_bind "$1" "$2" diff --git a/developer/source/mount/masu_subu__uid.sh b/developer/source/mount/masu_subu__uid.sh deleted file mode 100755 index d3a976e..0000000 --- a/developer/source/mount/masu_subu__uid.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash - -# Function to lookup the UID of the user and sub-user combination -get_subu_uid() { - local user=$1 - local subu=$2 - - # Concatenate user and sub-user name (no space around the = sign) - local subu_user="${user}-${subu}" - - # Lookup the UID for the sub-user (user-subuser) combination - subu_uid=$(id -u "$subu_user" 2>/dev/null) - - # If found, return only the UID, otherwise return nothing - if [ -n "$subu_uid" ]; then - echo "$subu_uid" - fi -} - -# Call the function with user and subu as arguments -get_subu_uid "$1" "$2" diff --git a/developer/source/mount/mount_pt_list.sh b/developer/source/mount/mount_pt_list.sh deleted file mode 100755 index 588958d..0000000 --- a/developer/source/mount/mount_pt_list.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash - -# Function to list available devices under /mnt, excluding /mnt itself -mount_pt_list() { - # List all directories in /mnt that are potentially available for mounting - find /mnt -mindepth 1 -maxdepth 1 -type d -exec basename {} \; -} - -# Call the function to display available devices -mount_pt_list diff --git a/developer/source/tunnel-client/.gitignore b/developer/source/tunnel-client/.gitignore deleted file mode 100644 index 5c016c6..0000000 --- a/developer/source/tunnel-client/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ - -__pycache__ - diff --git a/developer/source/tunnel-client/db/.gitignore b/developer/source/tunnel-client/db/.gitignore deleted file mode 100644 index 53642ce..0000000 --- a/developer/source/tunnel-client/db/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ - -* -!.gitignore - diff --git a/developer/source/tunnel-client/db_bind_user_to_iface.py b/developer/source/tunnel-client/db_bind_user_to_iface.py deleted file mode 100755 index 1ec4700..0000000 --- a/developer/source/tunnel-client/db_bind_user_to_iface.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python3 -# db_bind_user_to_iface.py — bind ONE linux user to ONE interface in the DB (no schema writes) -# Usage: ./db_bind_user_to_iface.py # e.g. ./db_bind_user_to_iface.py Thomas-x6 x6 - -from __future__ import annotations -import sys, sqlite3, pwd -from pathlib import Path -from typing import Optional -import incommon as ic # ROOT_DIR/DB_PATH, open_db() - -def system_uid_or_none(username: str) -> Optional[int]: - """Return the system UID for username, or None if the user doesn't exist locally.""" - try: - return pwd.getpwnam(username).pw_uid - except KeyError: - return None - -def bind_user_to_iface(conn: sqlite3.Connection, iface: str, username: str) -> str: - """ - Given (iface, username): - - Look up client.id by iface (table: client) - - Upsert into User(iface_id, username, uid) - - Update uid based on local /etc/passwd (None if user not found) - Returns a concise status string. - """ - row = conn.execute("SELECT id FROM Iface WHERE iface=? LIMIT 1;", (iface,)).fetchone() - if not row: - raise RuntimeError(f"Interface '{iface}' not found in client") - - iface_id = int(row[0]) - uid_val = system_uid_or_none(username) - - # Upsert binding - conn.execute(""" - INSERT INTO User (iface_id, username, uid, created_at, updated_at) - VALUES (?, ?, ?, strftime('%Y-%m-%dT%H:%M:%SZ','now'), strftime('%Y-%m-%dT%H:%M:%SZ','now')) - ON CONFLICT(iface_id, username) DO UPDATE SET - uid = excluded.uid, - updated_at = strftime('%Y-%m-%dT%H:%M:%SZ','now'); - """, (iface_id, username, uid_val)) - - if uid_val is None: - return f"bound {username} → {iface} (uid=NULL; user not present on this system)" - return f"bound {username} → {iface} (uid={uid_val})" - -def main(argv: list[str]) -> int: - if len(argv) != 2: - prog = Path(sys.argv[0]).name - print(f"Usage: {prog} ", file=sys.stderr) - return 2 - - username, iface = argv - try: - with ic.open_db() as conn: - msg = bind_user_to_iface(conn, iface, username) - conn.commit() - except FileNotFoundError as e: - print(f"❌ {e}", file=sys.stderr); return 1 - except sqlite3.Error as e: - print(f"❌ sqlite error: {e}", file=sys.stderr); return 1 - except RuntimeError as e: - print(f"❌ {e}", file=sys.stderr); return 1 - - print(f"✔ {msg}") - return 0 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/db_checks.py b/developer/source/tunnel-client/db_checks.py deleted file mode 100755 index ef172de..0000000 --- a/developer/source/tunnel-client/db_checks.py +++ /dev/null @@ -1,84 +0,0 @@ -#!/usr/bin/env python3 -# db_checks.py — quick audit for common misconfigurations - -from __future__ import annotations -import sys, sqlite3, ipaddress -import incommon as ic - -def audit(conn: sqlite3.Connection) -> int: - errs = 0 - - # 1) client present? - C = ic.rows(conn, """ - SELECT id, iface, local_address_cidr, rt_table_name_eff - FROM v_client_effective - ORDER BY iface; - """) - if not C: - print("WARN: no client present"); return 1 - - # 2) CIDR sanity - for cid, iface, cidr, rtname in C: - try: - ipaddress.IPv4Interface(cidr) - except Exception as e: - print(f"ERR: client {iface} has invalid CIDR {cidr}: {e}") - errs += 1 - - # 3) server exist and map to client - S = ic.rows(conn, """ - SELECT s.id, c.iface, s.name, s.public_key, s.endpoint_host, s.endpoint_port, s.allowed_ips - FROM server s - JOIN Iface c ON c.id = s.iface_id - ORDER BY c.iface, s.name; - """) - if not S: - print("WARN: no server present for any client") - - # 4) user bindings exist? (not required, but useful) - UB = ic.rows(conn, """ - SELECT c.iface, ub.username, ub.uid - FROM User ub - JOIN Iface c ON c.id = ub.iface_id - ORDER BY c.iface, ub.username; - """) - if not UB: - print("WARN: no User present") - - # 5) duplicate tunnel IPs across client (/32 equality) - tunnel_hosts = {} - for _, iface, cidr, _ in C: - try: - host = str(ipaddress.IPv4Interface(cidr).ip) - if host in tunnel_hosts and tunnel_hosts[host] != iface: - print(f"ERR: duplicate tunnel host {host} on {tunnel_hosts[host]} and {iface}") - errs += 1 - else: - tunnel_hosts[host] = iface - except Exception: - pass - - # 6) Server AllowedIPs hygiene: warn when 0.0.0.0/0 appears in server table - for sid, iface, sname, pub, host, port, allow in S: - if allow.strip() == "0.0.0.0/0": - # client-side full-tunnel is fine; server-side peer should use /32 entries - print(f"NOTE: server(name={sname}, client={iface}) has AllowedIPs=0.0.0.0/0 (client-side full-tunnel). Ensure server peer uses /32(s).") - - # 7) meta.subu_cidr present? - M = dict(ic.rows(conn, "SELECT key, value FROM meta;")) - if "subu_cidr" not in M: - print("WARN: meta.subu_cidr missing; default tooling may assume 10.0.0.0/24") - - print("OK: audit complete" if errs == 0 else f"FAIL: {errs} error(s)") - return 1 if errs else 0 - -def main(argv: list[str]) -> int: - try: - with ic.open_db() as conn: - return audit(conn) - except (sqlite3.Error, FileNotFoundError) as e: - print(f"❌ {e}", file=sys.stderr) - return 2 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/db_init_StanleyPark.py b/developer/source/tunnel-client/db_init_StanleyPark.py deleted file mode 100755 index a031a45..0000000 --- a/developer/source/tunnel-client/db_init_StanleyPark.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python3 -# db_init_StanleyPark.py — initialize the DB for the StanleyPark client - -from __future__ import annotations -import sys, subprocess, sqlite3 -from pathlib import Path -import incommon as ic - -# Use existing business functions (no duplication) -from db_init_iface_x6 import init_iface_x6 -from db_init_iface_US import init_iface_US -from db_init_server_x6 import init_server_x6 -from db_init_server_US import init_server_US -from db_bind_user_to_iface import bind_user_to_iface -from db_init_ip_table_registration import assign_missing_rt_table_ids -from db_init_ip_iface_addr_assign import reconcile_kernel_and_db_ipv4_addresses -from db_init_route_defaults import seed_default_routes - -ROOT = Path(__file__).resolve().parent -DB = ic.DB_PATH - -def msg_wrapped_call(title: str, fn=None, *args, **kwargs): - """Print a before/after status line around calling `fn(*args, **kwargs)`. - Returns the function’s return value.""" - print(f"→ {title}", flush=True) - res = fn(*args, **kwargs) if fn else None - print(f"✔ {title}" + (f": {res}" if res not in (None, "") else ""), flush=True) - return res - -def _run_local(script: str, *argv: str): - subprocess.run([str(ROOT / script), *argv], check=True) - -def db_init_StanleyPark() -> int: - """ - Given the local SQLite DB at ic.DB_PATH, this: - 1) loads schema - 2) upserts ifaces (x6, US) - 3) upserts servers (x6, US) - 4) binds users (Thomas-x6→x6, Thomas-US→US) - 5) seeds per-iface default routes into Route - 6) assigns missing rt_table_id values from /etc/iproute2/rt_tables - 7) reconciles/assigns interface IPv4 addresses (kernel→DB, then pool) - 8) commits and prints status - Returns 0 on success (raises on failure). - """ - # 1) Schema - msg_wrapped_call("db_schema_load.sh", _run_local, "db_schema_load.sh") - - # 2) DB work in one connection/commit - with ic.open_db(DB) as conn: - # ifaces + servers + user bindings - msg_wrapped_call("db_init_iface_x6.py (init_iface_x6)", init_iface_x6, conn) - msg_wrapped_call("db_init_server_x6.py (init_server_x6)", init_server_x6, conn) - msg_wrapped_call("bind_user_to_iface: Thomas-x6 → x6", bind_user_to_iface, conn, "x6", "Thomas-x6") - - msg_wrapped_call("db_init_iface_US.py (init_iface_US)", init_iface_US, conn) - msg_wrapped_call("db_init_server_US.py (init_server_US)", init_server_US, conn) - msg_wrapped_call("bind_user_to_iface: Thomas-US → US", bind_user_to_iface, conn, "US", "Thomas-US") - - # 5) seed default routes for the selected ifaces (no duplicates; idempotent) - msg_wrapped_call( - "db_init_route_defaults (x6,US)", - lambda: seed_default_routes(conn, iface_names=["x6","US"], overwrite=False) - ) - - # 6) assign rt_table_id from system tables (DB-only; no file writes) - msg_wrapped_call( - "db_init_ip_table_registration", - lambda: assign_missing_rt_table_ids(conn, low=20000, high=29999, dry_run=False) - ) - - # 7) reconcile/assign interface IPv4 addresses (kernel → DB; pool for missing) - msg_wrapped_call( - "db_init_ip_iface_addr_assign", - lambda: reconcile_kernel_and_db_ipv4_addresses( - conn, - pool_cidr="10.0.0.0/16", - assign_prefix=32, - reserve_first=0, - dry_run=False - ) - ) - - # 8) commit - conn.commit() - print("✔ commit: database updated") - - return 0 - -def main(argv): - if argv: - print(f"Usage: {Path(sys.argv[0]).name}", file=sys.stderr) - return 2 - try: - return db_init_StanleyPark() - except (subprocess.CalledProcessError, sqlite3.Error, FileNotFoundError) as e: - print(f"❌ {e}", file=sys.stderr) - return 1 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/db_init_iface.py b/developer/source/tunnel-client/db_init_iface.py deleted file mode 100644 index 1f9443e..0000000 --- a/developer/source/tunnel-client/db_init_iface.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python3 -# Helpers to seed/update a row in client. - -from __future__ import annotations -import sqlite3 -from typing import Any, Optional, Dict -import incommon as ic # provides DB_PATH, open_db - -# Normally don't set the addr_cidr, the system will automically -# assign a free address, or reuse one that is already set. - -def upsert_client(conn: sqlite3.Connection, - *, - iface: str, - addr_cidr: Optional[str] = None, - rt_table_name: Optional[str] = None, - rt_table_id: Optional[int] = None, - mtu: Optional[int] = None, - fwmark: Optional[int] = None, - dns_mode: Optional[str] = None, # 'none' or 'static' - dns_servers: Optional[str] = None, - autostart: Optional[int] = None, # 0 or 1 - bound_user: Optional[str] = None, - bound_uid: Optional[int] = None - ) -> str: - row = conn.execute( - """SELECT id, iface, rt_table_id, rt_table_name, local_address_cidr, - mtu, fwmark, dns_mode, dns_servers, autostart, - bound_user, bound_uid - FROM Iface WHERE iface=? LIMIT 1;""", - (iface,) - ).fetchone() - - defname = rt_table_name if rt_table_name is not None else iface - desired: Dict[str, Any] = {"iface": iface, "local_address_cidr": addr_cidr} - if rt_table_id is not None: desired["rt_table_id"] = rt_table_id - if rt_table_name is not None: desired["rt_table_name"] = rt_table_name - if mtu is not None: desired["mtu"] = mtu - if fwmark is not None: desired["fwmark"] = fwmark - if dns_mode is not None: desired["dns_mode"] = dns_mode - if dns_servers is not None: desired["dns_servers"] = dns_servers - if autostart is not None: desired["autostart"] = autostart - if bound_user is not None: desired["bound_user"] = bound_user - if bound_uid is not None: desired["bound_uid"] = bound_uid - - if row is None: - fields = ["iface","local_address_cidr","rt_table_name"] - vals = [iface, addr_cidr, defname] - for k in ("rt_table_id","mtu","fwmark","dns_mode","dns_servers","autostart","bound_user","bound_uid"): - if k in desired: fields.append(k); vals.append(desired[k]) - q = f"INSERT INTO Iface ({','.join(fields)}) VALUES ({','.join('?' for _ in vals)});" - cur = conn.execute(q, vals); conn.commit() - return f"seeded: client(iface={iface}) id={cur.lastrowid} addr={addr_cidr} rt={defname}" - else: - cid, _, rt_id, rt_name, cur_addr, cur_mtu, cur_fwm, cur_dns_mode, cur_dns_srv, cur_auto, cur_buser, cur_buid = row - current = { - "local_address_cidr": cur_addr, "rt_table_id": rt_id, "rt_table_name": rt_name, - "mtu": cur_mtu, "fwmark": cur_fwm, "dns_mode": cur_dns_mode, "dns_servers": cur_dns_srv, - "autostart": cur_auto, "bound_user": cur_buser, "bound_uid": cur_buid - } - changes: Dict[str, Any] = {} - for k, v in desired.items(): - if k == "iface": continue - if current.get(k) != v: changes[k] = v - if rt_name is None and "rt_table_name" not in changes: - changes["rt_table_name"] = defname - if not changes: - return f"ok: client(iface={iface}) unchanged id={cid} addr={cur_addr} rt={rt_name or defname}" - sets = ", ".join(f"{k}=?" for k in changes) - vals = list(changes.values()) + [iface] - conn.execute(f"UPDATE Iface SET {sets} WHERE iface=?;", vals); conn.commit() - return f"updated: client(iface={iface}) id={cid} " + " ".join(f"{k}={changes[k]}" for k in changes) diff --git a/developer/source/tunnel-client/db_init_iface_US.py b/developer/source/tunnel-client/db_init_iface_US.py deleted file mode 100755 index bf03c95..0000000 --- a/developer/source/tunnel-client/db_init_iface_US.py +++ /dev/null @@ -1,6 +0,0 @@ -# db_init_iface_US.py -from db_init_iface import upsert_client - -def init_iface_US(conn): - # iface US with dedicated table 'US' and a distinct host /32 - return upsert_client(conn, iface="US", rt_table_name="US") diff --git a/developer/source/tunnel-client/db_init_iface_x6.py b/developer/source/tunnel-client/db_init_iface_x6.py deleted file mode 100755 index 82eb5fe..0000000 --- a/developer/source/tunnel-client/db_init_iface_x6.py +++ /dev/null @@ -1,6 +0,0 @@ -# db_init_iface_x6.py -from db_init_iface import upsert_client - -def init_iface_x6(conn): - # iface x6 with dedicated table 'x6' and host /32 - return upsert_client(conn, iface="x6", rt_table_name="x6") diff --git a/developer/source/tunnel-client/db_init_ip_iface_addr_assign.py b/developer/source/tunnel-client/db_init_ip_iface_addr_assign.py deleted file mode 100755 index 561635e..0000000 --- a/developer/source/tunnel-client/db_init_ip_iface_addr_assign.py +++ /dev/null @@ -1,224 +0,0 @@ -#!/usr/bin/env python3 -""" -db_init_ip_iface_addr_assign.py - -Business API: - reconcile_kernel_and_db_ipv4_addresses(conn ,pool_cidr="10.0.0.0/16" ,assign_prefix=32 ,reserve_first=0 ,dry_run=False) - -> (updated_count ,notes) -""" - -from __future__ import annotations -import argparse -import ipaddress -import json -import sqlite3 -import subprocess -from typing import Dict ,Iterable ,List ,Optional ,Sequence ,Tuple - -import incommon as ic - - -def fetch_ifaces(conn: sqlite3.Connection) -> List[Tuple[int ,str ,Optional[str]]]: - sql = """ - SELECT id, - iface, - NULLIF(TRIM(local_address_cidr),'') AS local_address_cidr - FROM Iface - ORDER BY id; - """ - cur = conn.execute(sql) - rows = cur.fetchall() - return [ - (int(r[0]) ,str(r[1]) ,(str(r[2]) if r[2] is not None else None)) - for r in rows - ] - - -def update_iface_addresses(conn: sqlite3.Connection ,updates: Dict[int ,str]) -> int: - if not updates: - return 0 - with conn: - for iface_id ,cidr in updates.items(): - conn.execute("UPDATE Iface SET local_address_cidr=? WHERE id=?" ,(cidr ,iface_id)) - return len(updates) - - -def kernel_ipv4_cidr_for(iface: str) -> Optional[str]: - try: - cp = subprocess.run( - ["ip","-j","addr","show","dev",iface] - ,check=False - ,capture_output=True - ,text=True - ) - except Exception: - return None - if cp.returncode != 0 or not cp.stdout.strip(): - return None - try: - data = json.loads(cp.stdout) - except json.JSONDecodeError: - return None - if not isinstance(data ,list) or not data: - return None - addr_info = data[0].get("addr_info") or [] - for a in addr_info: - if a.get("family") == "inet" and a.get("scope") == "global": - local = a.get("local"); plen = a.get("prefixlen") - if local and isinstance(plen ,int): - return f"{local}/{plen}" - for a in addr_info: - if a.get("family") == "inet": - local = a.get("local"); plen = a.get("prefixlen") - if local and isinstance(plen ,int): - return f"{local}/{plen}" - return None - - -def kernel_ipv4_map(ifaces: Sequence[str]) -> Dict[str ,Optional[str]]: - return {name: kernel_ipv4_cidr_for(name) for name in ifaces} - - -def _host_ip_from_cidr(cidr: str): - try: - ipi = ipaddress.ip_interface(cidr) - except ValueError: - return None - if isinstance(ipi.ip ,ipaddress.IPv4Address): - return ipaddress.IPv4Address(int(ipi.ip)) - return None - - -def _collect_used_hosts_from(cidrs: Iterable[str] ,pool: ipaddress.IPv4Network) -> List[ipaddress.IPv4Address]: - used: List[ipaddress.IPv4Address] = [] - for c in cidrs: - hip = _host_ip_from_cidr(c) - if hip is not None and hip in pool: - used.append(hip) - return used - - -def _first_free_hosts( - count: int - ,used_hosts: Iterable[ipaddress.IPv4Address] - ,pool: ipaddress.IPv4Network - ,reserve_first: int = 0 -) -> List[ipaddress.IPv4Address]: - used_set = {int(h) for h in used_hosts} - result: List[ipaddress.IPv4Address] = [] - start = int(pool.network_address) + 1 + max(0 ,reserve_first) - end = int(pool.broadcast_address) - 1 - for val in range(start ,end+1): - if val not in used_set: - result.append(ipaddress.IPv4Address(val)) - if len(result) >= count: - break - if len(result) < count: - raise RuntimeError(f"address pool exhausted in {pool} (needed {count} more)") - return result - - -def plan_address_updates( - rows: Sequence[Tuple[int ,str ,Optional[str]]] - ,pool_cidr: str - ,assign_prefix: int - ,reserve_first: int - ,kmap: Dict[str ,Optional[str]] -) -> Tuple[Dict[int ,str] ,List[str]]: - notes: List[str] = [] - pool = ipaddress.IPv4Network(pool_cidr ,strict=False) - if pool.version != 4: - raise ValueError("only IPv4 pools supported") - - kernel_present = [c for c in kmap.values() if c] - db_present = [c for (_i ,_n ,c) in rows if c] - used_hosts = ( - _collect_used_hosts_from(kernel_present ,pool) - + _collect_used_hosts_from(db_present ,pool) - ) - - alloc_targets: List[Tuple[int ,str]] = [] - updates: Dict[int ,str] = {} - - for iface_id ,iface_name ,db_cidr in rows: - k_cidr = kmap.get(iface_name) - - if k_cidr: - if db_cidr != k_cidr: - updates[iface_id] = k_cidr - if db_cidr: - notes.append(f"sync: iface '{iface_name}' DB {db_cidr} -> kernel {k_cidr}") - else: - notes.append(f"sync: iface '{iface_name}' set from kernel {k_cidr}") - continue - - if db_cidr: - notes.append(f"note: iface '{iface_name}' has DB {db_cidr} but no kernel IPv4") - continue - - alloc_targets.append((iface_id ,iface_name)) - - if alloc_targets: - free = _first_free_hosts(len(alloc_targets) ,used_hosts ,pool ,reserve_first=reserve_first) - for idx ,(iface_id ,iface_name) in enumerate(alloc_targets): - cidr = f"{free[idx]}/{assign_prefix}" - updates[iface_id] = cidr - notes.append(f"assign: iface '{iface_name}' -> {cidr} (from pool {pool_cidr})") - - return (updates ,notes) - - -def reconcile_kernel_and_db_ipv4_addresses( - conn: sqlite3.Connection - ,pool_cidr: str = "10.0.0.0/16" - ,assign_prefix: int = 32 - ,reserve_first: int = 0 - ,dry_run: bool = False -) -> Tuple[int ,List[str]]: - rows = fetch_ifaces(conn) - iface_names = [n for (_i ,n ,_c) in rows] - kmap = kernel_ipv4_map(iface_names) - - updates ,notes = plan_address_updates( - rows - ,pool_cidr - ,assign_prefix - ,reserve_first - ,kmap - ) - if not updates: - return (0 ,notes or ["noop: nothing to change"]) - if dry_run: - return (0 ,notes) - - updated = update_iface_addresses(conn ,updates) - return (updated ,notes) - - -# --- thin CLI --- - -def main(argv=None) -> int: - ap = argparse.ArgumentParser() - ap.add_argument("--pool" ,type=str ,default="10.0.0.0/16") - ap.add_argument("--assign-prefix" ,type=int ,default=32) - ap.add_argument("--reserve-first" ,type=int ,default=0) - ap.add_argument("--dry-run" ,action="store_true") - args = ap.parse_args(argv) - with ic.open_db() as conn: - updated ,notes = reconcile_kernel_and_db_ipv4_addresses( - conn - ,pool_cidr=args.pool - ,assign_prefix=args.assign_prefix - ,reserve_first=args.reserve_first - ,dry_run=args.dry_run - ) - if notes: - print("\n".join(notes)) - if not args.dry_run: - print(f"updated rows: {updated}") - return 0 - - -if __name__ == "__main__": - import sys - sys.exit(main()) diff --git a/developer/source/tunnel-client/db_init_ip_table_registration.py b/developer/source/tunnel-client/db_init_ip_table_registration.py deleted file mode 100755 index 8436a2d..0000000 --- a/developer/source/tunnel-client/db_init_ip_table_registration.py +++ /dev/null @@ -1,159 +0,0 @@ -#!/usr/bin/env python3 -""" -db_init_ip_table_registration.py - -Business API: - assign_missing_rt_table_ids(conn ,low=20000 ,high=29999 ,dry_run=False) - -> (updated_count ,planned_map ,notes) - -Policy: -- Effective table name per iface is COALESCE(rt_table_name ,iface). -- If that name exists in /etc/iproute2/rt_tables, reuse its number. -- Else allocate first free number in [low ,high]. -- Writes DB only. Does NOT write rt_tables. -""" - -from __future__ import annotations -import argparse -import sqlite3 -from pathlib import Path -from typing import Dict ,Iterable ,List ,Optional ,Sequence ,Tuple - -import incommon as ic # for CLI path only - -RT_TABLES_PATH = Path("/etc/iproute2/rt_tables") - - -def parse_rt_tables(path: Path) -> Tuple[List[str] ,Dict[str ,int] ,Dict[int ,str]]: - text = path.read_text() if path.exists() else "" - lines = text.splitlines() - name_to_num: Dict[str ,int] = {} - num_to_name: Dict[int ,str] = {} - for ln in lines: - s = ln.strip() - if not s or s.startswith("#"): - continue - parts = s.split() - if len(parts) >= 2 and parts[0].isdigit(): - n = int(parts[0]); name = parts[1] - if name not in name_to_num and n not in num_to_name: - name_to_num[name] = n - num_to_name[n] = name - return (lines ,name_to_num ,num_to_name) - - -def first_free_id(used: Iterable[int] ,low: int ,high: int) -> int: - used_set = set(u for u in used if low <= u <= high) - for n in range(low ,high+1): - if n not in used_set: - return n - raise RuntimeError(f"no free routing-table IDs in [{low},{high}]") - - -def fetch_effective_ifaces(conn: sqlite3.Connection) -> List[Tuple[int ,str ,Optional[int]]]: - sql = """ - SELECT i.id, - COALESCE(i.rt_table_name, i.iface) AS eff_name, - i.rt_table_id - FROM Iface i - ORDER BY i.id; - """ - cur = conn.execute(sql) - rows = cur.fetchall() - return [ - (int(r[0]) ,str(r[1]) ,(int(r[2]) if r[2] is not None else None)) - for r in rows - ] - - -def update_rt_ids(conn: sqlite3.Connection ,updates: Dict[int ,int]) -> int: - if not updates: - return 0 - with conn: - for iface_id ,rt_id in updates.items(): - conn.execute("UPDATE Iface SET rt_table_id=? WHERE id=?" ,(rt_id ,iface_id)) - return len(updates) - - -def plan_rt_id_assignments( - ifaces: Sequence[Tuple[int ,str ,Optional[int]]] - ,name_to_num_sys: Dict[str ,int] - ,existing_ids_in_db: Iterable[int] - ,low: int - ,high: int -) -> Dict[int ,int]: - used_numbers = set(int(x) for x in existing_ids_in_db) | set(name_to_num_sys.values()) - planned: Dict[int ,int] = {} - - names_seen: Dict[str ,int] = {} - for iface_id ,eff_name ,_ in ifaces: - if eff_name in names_seen and names_seen[eff_name] != iface_id: - raise RuntimeError( - f"duplicate effective table name in DB: '{eff_name}' used by Iface.id {names_seen[eff_name]} and {iface_id}" - ) - names_seen[eff_name] = iface_id - - for iface_id ,eff_name ,current_id in ifaces: - if current_id is not None: - used_numbers.add(int(current_id)) - continue - if eff_name in name_to_num_sys: - rt_id = int(name_to_num_sys[eff_name]) - else: - rt_id = first_free_id(used_numbers ,low ,high) - planned[iface_id] = rt_id - used_numbers.add(rt_id) - - return planned - - -def assign_missing_rt_table_ids( - conn: sqlite3.Connection - ,low: int = 20000 - ,high: int = 29999 - ,dry_run: bool = False -) -> Tuple[int ,Dict[int ,int] ,List[str]]: - _ ,name_to_num_sys ,_ = parse_rt_tables(RT_TABLES_PATH) - notes: List[str] = [] - - rows = fetch_effective_ifaces(conn) - existing_ids = [r[2] for r in rows if r[2] is not None] - planned = plan_rt_id_assignments(rows ,name_to_num_sys ,existing_ids ,low ,high) - - if not planned: - return (0 ,{} ,["noop: all Iface.rt_table_id already set"]) - - for iface_id ,eff_name ,current in rows: - if iface_id in planned: - notes.append(f"Iface.id={iface_id} name='{eff_name}' rt_table_id: {current} -> {planned[iface_id]}") - - if dry_run: - return (0 ,planned ,notes) - - updated = update_rt_ids(conn ,planned) - return (updated ,planned ,notes) - - -# --- thin CLI --- - -def main(argv=None) -> int: - ap = argparse.ArgumentParser() - ap.add_argument("--low" ,type=int ,default=20000) - ap.add_argument("--high" ,type=int ,default=29999) - ap.add_argument("--dry-run" ,action="store_true") - args = ap.parse_args(argv) - if args.low < 0 or args.high < args.low: - print(f"error: invalid range [{args.low},{args.high}]") - return 2 - with ic.open_db() as conn: - updated ,_planned ,notes = assign_missing_rt_table_ids(conn ,low=args.low ,high=args.high ,dry_run=args.dry_run) - if notes: - print("\n".join(notes)) - if not args.dry_run: - print(f"updated rows: {updated}") - return 0 - - -if __name__ == "__main__": - import sys - sys.exit(main()) diff --git a/developer/source/tunnel-client/db_init_route_defaults.py b/developer/source/tunnel-client/db_init_route_defaults.py deleted file mode 100644 index 857f27b..0000000 --- a/developer/source/tunnel-client/db_init_route_defaults.py +++ /dev/null @@ -1,138 +0,0 @@ -#!/usr/bin/env python3 -""" -db_init_route_defaults.py - -Business API: - seed_default_routes(conn ,iface_names ,overwrite=False ,metric=None) - -> (inserted_count ,notes[list]) - -What it does: -- For each iface in iface_names, ensure a default route "0.0.0.0/0" - is present in the Route table (on_up=1, no via/metric/table override). -- If overwrite=True, it first deletes existing Route rows for those ifaces, - then inserts the defaults. -- Writes **DB only**. It does not touch the kernel or /etc/iproute2/rt_tables. - -Why: -- Your apply script reads Route rows and emits `ip -4 route replace … table `. - Seeding a per-iface default route makes policy-routed tables usable out of the box. -""" - -from __future__ import annotations -import argparse -import sqlite3 -from typing import Dict ,Iterable ,List ,Optional ,Sequence ,Tuple - -# import helper to open DB when run as CLI; the business API accepts a conn -try: - import incommon as ic # type: ignore -except Exception: - ic = None # ok when used as a lib - - -def _iface_map(conn: sqlite3.Connection ,iface_names: Sequence[str]) -> Dict[str ,int]: - """Return {iface_name -> iface_id} for provided names (must exist).""" - if not iface_names: - return {} - ph = ",".join("?" for _ in iface_names) - sql = f"""SELECT id ,iface FROM Iface WHERE iface IN ({ph}) ORDER BY id;""" - rows = conn.execute(sql ,tuple(iface_names)).fetchall() - found = {str(name): int(iid) for (iid ,name) in rows} - missing = [n for n in iface_names if n not in found] - if missing: - raise RuntimeError(f"iface(s) not found: {', '.join(missing)}") - return found - - -def _existing_defaults(conn: sqlite3.Connection ,iface_ids: Iterable[int]) -> Dict[int ,bool]: - """Return {iface_id -> True/False} whether a default route row already exists (on_up=1).""" - ids = list(iface_ids) - if not ids: - return {} - ph = ",".join("?" for _ in ids) - sql = f""" - SELECT iface_id ,COUNT(1) - FROM Route - WHERE iface_id IN ({ph}) - AND cidr='0.0.0.0/0' - AND on_up=1 - GROUP BY iface_id; - """ - out: Dict[int ,bool] = {i: False for i in ids} - for iid ,cnt in conn.execute(sql ,tuple(ids)).fetchall(): - out[int(iid)] = int(cnt) > 0 - return out - - -def seed_default_routes( - conn: sqlite3.Connection - ,iface_names: Sequence[str] - ,overwrite: bool = False - ,metric: Optional[int] = None -) -> Tuple[int ,List[str]]: - """ - Upsert per-iface default routes into Route. - - Inserts rows: - (iface_id ,cidr='0.0.0.0/0' ,via=NULL ,table_name=NULL ,metric= ,on_up=1 ,on_down=0) - """ - if not iface_names: - raise RuntimeError("no interfaces provided") - - id_map = _iface_map(conn ,iface_names) - iface_ids = list(id_map.values()) - notes: List[str] = [] - inserted = 0 - - with conn: - if overwrite: - ph = ",".join("?" for _ in iface_ids) - conn.execute(f"DELETE FROM Route WHERE iface_id IN ({ph});" ,tuple(iface_ids)) - notes.append(f"cleared existing Route rows for: {', '.join(iface_names)}") - - exists = _existing_defaults(conn ,iface_ids) - - for name in iface_names: - iid = id_map[name] - if exists.get(iid): - notes.append(f"keep: default route already present for {name}") - continue - conn.execute( - """ - INSERT INTO Route(iface_id ,cidr ,via ,table_name ,metric ,on_up ,on_down - ,created_at ,updated_at) - VALUES( ? ,'0.0.0.0/0' ,NULL ,NULL ,? ,1 ,0 - ,strftime('%Y-%m-%dT%H:%M:%SZ','now') ,strftime('%Y-%m-%dT%H:%M:%SZ','now')) - """ - ,(iid ,metric) - ) - inserted += 1 - notes.append(f"add: default route 0.0.0.0/0 for {name}") - - return (inserted ,notes) - - -# ---- thin CLI for ad-hoc use ---- - -def main(argv: Optional[Sequence[str]] = None) -> int: - ap = argparse.ArgumentParser(description="Seed per-iface default Route rows.") - ap.add_argument("ifaces" ,nargs="+") - ap.add_argument("--overwrite" ,action="store_true") - ap.add_argument("--metric" ,type=int ,default=None) - args = ap.parse_args(argv) - - if ic is None: - print("error: cannot locate incommon.open_db() for CLI use") - return 2 - - with ic.open_db() as conn: - n ,notes = seed_default_routes(conn ,args.ifaces ,overwrite=args.overwrite ,metric=args.metric) - if notes: - print("\n".join(notes)) - print(f"inserted: {n}") - return 0 - - -if __name__ == "__main__": - import sys - sys.exit(main()) diff --git a/developer/source/tunnel-client/db_init_server_US.py b/developer/source/tunnel-client/db_init_server_US.py deleted file mode 100755 index d8cfcd0..0000000 --- a/developer/source/tunnel-client/db_init_server_US.py +++ /dev/null @@ -1,17 +0,0 @@ -# db_init_server_US.py -from db_init_server_incommon import upsert_server - -def init_server_US(conn): - # Endpoint from the historical config; adjust if needed - return upsert_server( - conn, - client_iface="US", - server_name="US", - server_public_key="h8ZYEEVMForvv9p5Wx+9+eZ87t692hTN7sks5Noedw8=", - endpoint_host="35.194.71.194", - endpoint_port=443, - allowed_ips="0.0.0.0/0", - keepalive_s=25, - route_allowed_ips=0, - priority=100, - ) diff --git a/developer/source/tunnel-client/db_init_server_incommon.py b/developer/source/tunnel-client/db_init_server_incommon.py deleted file mode 100644 index 18edb1f..0000000 --- a/developer/source/tunnel-client/db_init_server_incommon.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python3 -# Helpers to upsert a row in server bound to a client iface. - -from __future__ import annotations -import sqlite3 -from typing import Optional, Any, Dict -import incommon as ic # provides open_db, get_client_id - -def upsert_server(conn: sqlite3.Connection, - *, - client_iface: str, - server_name: str, - server_public_key: str, - endpoint_host: str, - endpoint_port: int, - allowed_ips: str, - preshared_key: Optional[str] = None, - keepalive_s: Optional[int] = None, - route_allowed_ips: int = 0, - priority: int = 100) -> str: - cid = ic.get_client_id(conn, client_iface) - - row = conn.execute( - "SELECT id, public_key, preshared_key, endpoint_host, endpoint_port, allowed_ips, " - " keepalive_s, route_allowed_ips, priority " - "FROM server WHERE iface_id=? AND name=? LIMIT 1;", - (cid, server_name), - ).fetchone() - - desired = { - "public_key": server_public_key, - "preshared_key": preshared_key, - "endpoint_host": endpoint_host, - "endpoint_port": endpoint_port, - "allowed_ips": allowed_ips, - "keepalive_s": keepalive_s, - "route_allowed_ips": route_allowed_ips, - "priority": priority, - } - - if row is None: - q = ( - "INSERT INTO server (iface_id,name,public_key,preshared_key," - " endpoint_host,endpoint_port,allowed_ips,keepalive_s,route_allowed_ips,priority," - " created_at,updated_at) " - "VALUES (?,?,?,?,?,?,?,?,?,?, strftime('%Y-%m-%dT%H:%M:%SZ','now'), strftime('%Y-%m-%dT%H:%M:%SZ','now'));" - ) - params = (cid, server_name, desired["public_key"], desired["preshared_key"], - desired["endpoint_host"], desired["endpoint_port"], desired["allowed_ips"], - desired["keepalive_s"], desired["route_allowed_ips"], desired["priority"]) - cur = conn.execute(q, params); conn.commit() - return f"seeded: server(name={server_name}) client={client_iface} id={cur.lastrowid}" - else: - sid, pub, psk, host, port, allow, ka, route_ai, prio = row - current = { - "public_key": pub, "preshared_key": psk, "endpoint_host": host, "endpoint_port": port, - "allowed_ips": allow, "keepalive_s": ka, "route_allowed_ips": route_ai, "priority": prio - } - changes: Dict[str, Any] = {k: v for k, v in desired.items() if v != current.get(k)} - if not changes: - return f"ok: server(name={server_name}) client={client_iface} unchanged id={sid}" - sets = ", ".join(f"{k}=?" for k in changes) - params = list(changes.values()) + [cid, server_name] - conn.execute( - f"UPDATE server SET {sets}, updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') " - "WHERE iface_id=? AND name=?;", params - ) - conn.commit() - return f"updated: server(name={server_name}) client={client_iface} id={sid} " + " ".join(f"{k}={changes[k]}" for k in changes) diff --git a/developer/source/tunnel-client/db_init_server_x6.py b/developer/source/tunnel-client/db_init_server_x6.py deleted file mode 100755 index 3377d91..0000000 --- a/developer/source/tunnel-client/db_init_server_x6.py +++ /dev/null @@ -1,16 +0,0 @@ -# db_init_server_x6.py -from db_init_server_incommon import upsert_server - -def init_server_x6(conn): - return upsert_server( - conn, - client_iface="x6", - server_name="x6", - server_public_key="pcbDlC1ZVoBYaN83/zAsvIvhgw0iQOL1YZKX5hcAqno=", - endpoint_host="66.248.243.113", - endpoint_port=51820, - allowed_ips="0.0.0.0/0", - keepalive_s=25, - route_allowed_ips=0, - priority=100, - ) diff --git a/developer/source/tunnel-client/db_schema.sql b/developer/source/tunnel-client/db_schema.sql deleted file mode 100644 index cf9cdb0..0000000 --- a/developer/source/tunnel-client/db_schema.sql +++ /dev/null @@ -1,118 +0,0 @@ -PRAGMA foreign_keys = ON; -PRAGMA journal_mode = WAL; -PRAGMA user_version = 300; -- v3.00: singular, capitalized tables; private_key removed - --- meta first (so later INSERTs succeed) -CREATE TABLE IF NOT EXISTS Meta ( - key TEXT PRIMARY KEY - ,value TEXT NOT NULL -); -INSERT OR REPLACE INTO Meta(key,value) VALUES ('schema','wg-client-v3.00-Ifaces'); -INSERT OR IGNORE INTO Meta(key,value) VALUES ('subu_cidr','10.0.0.0/24'); - --- Iface, interface, device, netdevice, link — table of them -CREATE TABLE IF NOT EXISTS Iface ( - id INTEGER PRIMARY KEY - ,created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) - ,updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) - ,iface TEXT NOT NULL UNIQUE -- kernel interface name as shown by ip link (e.g., wg0, x6) - ,rt_table_id INTEGER -- e.g. 1002, unused - ,rt_table_name TEXT -- if NULL, default to iface (see view) - -- legacy caches (kept for compatibility; may be NULL) - ,bound_user TEXT - ,bound_uid INTEGER - ,local_address_cidr TEXT -- e.g. '10.8.0.2/32' - -- secrets: private key is NO LONGER stored in DB (lives under key/) - ,public_key TEXT CHECK (public_key IS NULL OR length(public_key) BETWEEN 43 AND 45) - ,mtu INTEGER - ,fwmark INTEGER - ,dns_mode TEXT NOT NULL DEFAULT 'none' CHECK (dns_mode IN ('none','static')) - ,dns_servers TEXT - ,autostart INTEGER NOT NULL DEFAULT 0 -); - --- Server (one or more remote peers for an Iface) -CREATE TABLE IF NOT EXISTS Server ( - id INTEGER PRIMARY KEY - ,created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) - ,updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) - ,iface_id INTEGER NOT NULL REFERENCES Iface(id) ON DELETE CASCADE - ,name TEXT NOT NULL -- e.g. 'x6', 'US' - ,public_key TEXT NOT NULL CHECK (length(public_key) BETWEEN 43 AND 45) - ,preshared_key TEXT CHECK (preshared_key IS NULL OR length(preshared_key) BETWEEN 43 AND 45) - ,endpoint_host TEXT NOT NULL - ,endpoint_port INTEGER NOT NULL CHECK (endpoint_port BETWEEN 1 AND 65535) - ,allowed_ips TEXT NOT NULL -- typically '0.0.0.0/0' - ,keepalive_s INTEGER - ,route_allowed_ips INTEGER NOT NULL DEFAULT 1 - ,priority INTEGER NOT NULL DEFAULT 100 - ,UNIQUE(iface_id, name) -); - --- Route (optional extra routes applied by post-up script) -CREATE TABLE IF NOT EXISTS Route ( - id INTEGER PRIMARY KEY - ,created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) - ,updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) - ,iface_id INTEGER NOT NULL REFERENCES Iface(id) ON DELETE CASCADE - ,cidr TEXT NOT NULL - ,via TEXT - ,table_name TEXT - ,metric INTEGER - ,on_up INTEGER NOT NULL DEFAULT 1 - ,on_down INTEGER NOT NULL DEFAULT 0 -); - --- User (many linux users → one Iface) --- each user is bound to an iface via an 'ip rule add uidrange ..' command -CREATE TABLE IF NOT EXISTS User ( - id INTEGER PRIMARY KEY - ,created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) - ,updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) - ,iface_id INTEGER NOT NULL REFERENCES Iface(id) ON DELETE CASCADE - ,username TEXT NOT NULL - ,uid INTEGER -- cached UID if resolved - ,UNIQUE(iface_id, username) -); - --- Effective view (provides computed defaults like rt_table_name_eff) -CREATE VIEW IF NOT EXISTS v_iface_effective AS -SELECT - i.id - ,i.iface - ,COALESCE(i.rt_table_name, i.iface) AS rt_table_name_eff - ,i.local_address_cidr -FROM Iface i; - --- mtime triggers -CREATE TRIGGER IF NOT EXISTS trg_iface_mtime -AFTER UPDATE ON Iface FOR EACH ROW -BEGIN - UPDATE Iface - SET updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') - WHERE id=NEW.id; -END; - -CREATE TRIGGER IF NOT EXISTS trg_server_mtime -AFTER UPDATE ON Server FOR EACH ROW -BEGIN - UPDATE Server - SET updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') - WHERE id=NEW.id; -END; - -CREATE TRIGGER IF NOT EXISTS trg_route_mtime -AFTER UPDATE ON Route FOR EACH ROW -BEGIN - UPDATE Route - SET updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') - WHERE id=NEW.id; -END; - -CREATE TRIGGER IF NOT EXISTS trg_user_binding_mtime -AFTER UPDATE ON User FOR EACH ROW -BEGIN - UPDATE User - SET updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') - WHERE id=NEW.id; -END; diff --git a/developer/source/tunnel-client/db_schema_load.sh b/developer/source/tunnel-client/db_schema_load.sh deleted file mode 100755 index d4718bf..0000000 --- a/developer/source/tunnel-client/db_schema_load.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env bash -# db_init.sh — create/upgrade db/store by loading schema.sql (idempotent) - -set -euo pipefail -DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" -DB="$DIR/db/store" -SCHEMA="$DIR/db_schema.sql" - -command -v sqlite3 >/dev/null || { echo "❌ sqlite3 not found"; exit 1; } -[[ -f "$SCHEMA" ]] || { echo "❌ schema file missing: $SCHEMA"; exit 1; } - -if [[ -f "$DB" ]]; then - ts="$(date -u +%Y%m%dT%H%M%SZ)" - cp -f -- "$DB" "$DB.bak-$ts" - echo "↩︎ Backed up existing DB to $DB.bak-$ts" -fi - -sqlite3 -cmd '.bail on' "$DB" < "$SCHEMA" - -ver="$(sqlite3 "$DB" 'PRAGMA user_version;')" -echo "✔ DB ready: $DB (user_version=$ver)" -echo " Tables:" -sqlite3 -noheader -list "$DB" "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;" diff --git a/developer/source/tunnel-client/db_wipe.py b/developer/source/tunnel-client/db_wipe.py deleted file mode 100755 index d0eb4ec..0000000 --- a/developer/source/tunnel-client/db_wipe.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env python3 -""" -db_wipe.py - -Remove regular (non-directory) files in ./db, keeping the directory. - -Safety -- Refuses to run if the target directory does not exist or its basename is not exactly "db". -- Prints a plan, then asks "Are you sure? [y/N]" unless --force is used. -- --dry-run prints what would be removed without deleting. -- Hidden files (names starting with '.') are preserved by default; use --include-hidden to delete them too. - -Usage - ./db_wipe.py # plan + prompt, non-hidden files only, ./db next to this script - ./db_wipe.py --force # no prompt - ./db_wipe.py --dry-run # show what would be deleted - ./db_wipe.py --include-hidden - ./db_wipe.py --db /path/to/db -""" - -from __future__ import annotations -from pathlib import Path -from typing import Iterable, List, Tuple -import argparse -import sys -import os - -# ---------- business ---------- - -def plan_db_wipe(db_dir: Path, include_hidden: bool = False) -> List[Path]: - """ - Return a sorted list of file Paths (depth=1) to delete from db_dir. - """ - if not db_dir.exists(): - raise FileNotFoundError(f"not found: {db_dir}") - if not db_dir.is_dir(): - raise NotADirectoryError(f"not a directory: {db_dir}") - if db_dir.name != "db": - raise RuntimeError(f"expected directory named 'db', got: {db_dir.name}") - - def _is_hidden(p: Path) -> bool: - return p.name.startswith(".") - - files = [p for p in db_dir.iterdir() if p.is_file()] - if not include_hidden: - files = [p for p in files if not _is_hidden(p)] - - # Sort by name for stable output - return sorted(files, key=lambda p: p.name) - - -def wipe_db( - db_dir: Path, - include_hidden: bool = False, - dry_run: bool = False, - assume_yes: bool = False, - _prompt_fn=input, -) -> Tuple[int, List[str]]: - """ - Delete planned files from db_dir. Returns (deleted_count, logs). - Does not prompt if assume_yes=True or dry_run=True. - """ - targets = plan_db_wipe(db_dir, include_hidden=include_hidden) - - logs: List[str] = [] - script_dir = Path(__file__).resolve().parent - - if not targets: - logs.append(f"db_wipe: no matching files in: {db_dir.relative_to(script_dir)}") - return (0, logs) - - logs.append("db_wipe: plan") - for p in targets: - # Show path relative to script directory like the original - rel = p.resolve().relative_to(script_dir) - logs.append(f" delete: {rel}") - - if dry_run: - logs.append("db_wipe: dry-run; no changes made") - return (0, logs) - - if not assume_yes: - print("\n".join(logs)) - try: - ans = _prompt_fn("Are you sure? [y/N] ").strip().lower() - except EOFError: - ans = "" - if ans not in ("y", "yes"): - logs.append("db_wipe: aborted") - return (0, logs) - - deleted = 0 - for p in targets: - try: - p.unlink(missing_ok=True) # py3.8+: if not available, catch FileNotFoundError - deleted += 1 - except FileNotFoundError: - # Equivalent to rm -f - pass - - rel_db = db_dir.resolve().relative_to(script_dir) - logs.append(f"db_wipe: deleted {deleted} file(s) from {rel_db}") - return (deleted, logs) - - -# ---------- CLI wrapper ---------- - -def _default_db_dir() -> Path: - return Path(__file__).resolve().parent / "db" - -def main(argv: list[str] | None = None) -> int: - ap = argparse.ArgumentParser(description="Remove regular files in ./db, keeping the directory.") - ap.add_argument("--db", default=str(_default_db_dir()), help="path to the db directory (default: ./db next to this script)") - ap.add_argument("--force", action="store_true", help="do not prompt for confirmation") - ap.add_argument("--dry-run", action="store_true", help="print what would be removed without deleting") - ap.add_argument("--include-hidden", action="store_true", help="include dotfiles (e.g., .gitignore)") - args = ap.parse_args(argv) - - db_dir = Path(args.db) - - try: - deleted, logs = wipe_db( - db_dir=db_dir, - include_hidden=args.include_hidden, - dry_run=args.dry_run, - assume_yes=args.force or args.dry_run, - ) - if logs: - print("\n".join(logs)) - return 0 - except (FileNotFoundError, NotADirectoryError, RuntimeError) as e: - print(f"❌ {e}", file=sys.stderr) - return 1 - except Exception as e: - print(f"❌ unexpected error: {e}", file=sys.stderr) - return 2 - -if __name__ == "__main__": - sys.exit(main()) diff --git a/developer/source/tunnel-client/deploy_StanleyPark.py b/developer/source/tunnel-client/deploy_StanleyPark.py deleted file mode 100755 index 933311c..0000000 --- a/developer/source/tunnel-client/deploy_StanleyPark.py +++ /dev/null @@ -1,216 +0,0 @@ -#!/usr/bin/env python3 -""" -deploy_StanleyPark.py — stop → install staged files → start (for selected ifaces) - -- Requires root. Exits after reporting *all* detected CLI/import errors. -- Calls business functions directly: - * stop_clean_iface.stop_clean_ifaces(ifaces) - * install_staged_tree.install_staged_tree(stage_root, dest_root, create_dirs, skip_identical) - * start_iface.start_ifaces(ifaces) -- If no ifaces provided on CLI, it discovers them from the stage tree. - -Usage: - sudo ./deploy_StanleyPark.py # discover ifaces from stage, stop→install→start - sudo ./deploy_StanleyPark.py x6 US # explicit iface list - sudo ./deploy_StanleyPark.py --no-stop # skip stop step - sudo ./deploy_StanleyPark.py --no-start # skip start step - sudo ./deploy_StanleyPark.py --stage ./stage --root / --create-dirs -""" - -from __future__ import annotations -from pathlib import Path -from typing import List, Sequence, Tuple -import argparse -import os -import sys -import traceback - -ROOT = Path(__file__).resolve().parent -sys.path.insert(0, str(ROOT)) # ensure sibling modules importable - -# --- lightweight staged-iface discovery (duplicated here to avoid importing internals) --- -def _discover_ifaces_from_stage(stage_root: Path) -> List[str]: - names = set() - # from /etc/wireguard/.conf - wg_dir = stage_root / "etc" / "wireguard" - if wg_dir.is_dir(): - for p in wg_dir.glob("*.conf"): - names.add(p.stem) - # from /etc/systemd/system/wg-quick@.service.d/ - sysd = stage_root / "etc" / "systemd" / "system" - if sysd.is_dir(): - for d in sysd.glob("wg-quick@*.service.d"): - nm = d.name # wg-quick@IFACE.service.d - at = nm.find("@") - dot = nm.find(".service.d") - if at != -1 and dot != -1 and dot > at: - names.add(nm[at+1:dot]) - return sorted(names) - -def _is_root() -> bool: - try: - return os.geteuid() == 0 - except AttributeError: - # Non-POSIX: best effort - return False - -def _validate_iface_name(n: str) -> bool: - # conservative: letters, digits, dash, underscore (WireGuard allows more, but keep it safe) - import re - return bool(re.fullmatch(r"[A-Za-z0-9_-]{1,32}", n)) - -def _collect_errors(args) -> Tuple[List[str], List[str]]: - """ - Return (errors, ifaces). Does *not* raise. - """ - errors: List[str] = [] - - # Root required - if not _is_root(): - errors.append("must be run as root (sudo)") - - # Stage root - stage_root = Path(args.stage) - if not stage_root.exists(): - errors.append(f"stage path does not exist: {stage_root}") - - # Import modules - inst_mod = None - stop_mod = None - start_mod = None - try: - import install_staged_tree as inst_mod # type: ignore - except Exception as e: - errors.append(f"failed to import install_staged_tree: {e}") - try: - import stop_clean_iface as stop_mod # type: ignore - except Exception as e: - errors.append(f"failed to import stop_clean_iface: {e}") - try: - import start_iface as start_mod # type: ignore - except Exception as e: - errors.append(f"failed to import start_iface: {e}") - - # Business functions existence (only if imports worked) - if inst_mod is not None and not hasattr(inst_mod, "install_staged_tree"): - errors.append("install_staged_tree module missing function: install_staged_tree") - if stop_mod is not None and not hasattr(stop_mod, "stop_clean_ifaces"): - errors.append("stop_clean_iface module missing function: stop_clean_ifaces") - if start_mod is not None and not hasattr(start_mod, "start_ifaces"): - errors.append("start_iface module missing function: start_ifaces") - - # Ifaces - ifaces: List[str] - if args.ifaces: - ifaces = list(dict.fromkeys(args.ifaces)) # dedup preserve order - else: - ifaces = _discover_ifaces_from_stage(stage_root) - if not ifaces: - errors.append("no interfaces provided and none discovered from stage") - else: - bad = [n for n in ifaces if not _validate_iface_name(n)] - if bad: - errors.append(f"invalid iface name(s): {', '.join(bad)}") - - return (errors, ifaces) - -def deploy_StanleyPark( - ifaces: Sequence[str], - stage_root: Path, - dest_root: Path, - create_dirs: bool, - skip_identical: bool, - do_stop: bool, - do_start: bool, -) -> int: - """ - Orchestration: stop (optional) → install → start (optional). - """ - # Late imports so unit tests can monkeypatch easily - import install_staged_tree as inst - import stop_clean_iface as stopm - import start_iface as startm - - print(f"Deploy plan:\n ifaces: {', '.join(ifaces)}\n stage: {stage_root}\n root: {dest_root}\n") - - # Stop - if do_stop: - print(f"Stopping: {' '.join(ifaces)}") - try: - stop_logs = stopm.stop_clean_ifaces(ifaces) - if isinstance(stop_logs, (list, tuple)): - for line in stop_logs: - print(line) - except Exception: - print("warn: stop_clean_ifaces raised an exception (continuing):") - traceback.print_exc() - - # Install - print("\nInstalling staged artifacts…") - try: - logs, detected = inst.install_staged_tree( - stage_root=stage_root, - dest_root=dest_root, - create_dirs=create_dirs, - skip_identical=skip_identical, - ) - for line in logs: - print(line) - except Exception: - print("❌ install failed with exception:", file=sys.stderr) - traceback.print_exc() - return 2 - - # Start - if do_start: - # Prefer explicit ifaces; fall back to what installer detected - start_list = list(ifaces) if ifaces else list(detected) - if not start_list: - print("\nNo interfaces to start (none detected).") - else: - print(f"\nStarting: {' '.join(start_list)}") - try: - start_logs = startm.start_ifaces(start_list) - if isinstance(start_logs, (list, tuple)): - for line in start_logs: - print(line) - except Exception: - print("warn: start_ifaces raised an exception:", file=sys.stderr) - traceback.print_exc() - return 2 - - print("\n✓ Deploy complete.") - return 0 - -def main(argv: List[str] | None = None) -> int: - ap = argparse.ArgumentParser(description="Deploy staged WG artifacts for StanleyPark (stop→install→start).") - ap.add_argument("ifaces", nargs="*", help="interfaces to manage (default: discover from stage)") - ap.add_argument("--stage", default=str(ROOT / "stage"), help="stage root (default: ./stage)") - ap.add_argument("--root", default="/", help="destination root (default: /)") - ap.add_argument("--create-dirs", action="store_true", help="create missing parent directories") - ap.add_argument("--no-skip-identical", action="store_true", help="always replace even if content identical") - ap.add_argument("--no-stop", action="store_true", help="do not stop interfaces before install") - ap.add_argument("--no-start", action="store_true", help="do not start interfaces after install") - args = ap.parse_args(argv) - - # Collect all errors up front - errors, ifaces = _collect_errors(args) - if errors: - print("❌ deploy preflight found issue(s):", file=sys.stderr) - for e in errors: - print(f" - {e}", file=sys.stderr) - return 2 - - # Proceed - return deploy_StanleyPark( - ifaces=ifaces, - stage_root=Path(args.stage), - dest_root=Path(args.root), - create_dirs=args.create_dirs, - skip_identical=(not args.no_skip_identical), - do_stop=(not args.no_stop), - do_start=(not args.no_start), - ) - -if __name__ == "__main__": - sys.exit(main()) diff --git a/developer/source/tunnel-client/deprecated/.gitignore b/developer/source/tunnel-client/deprecated/.gitignore deleted file mode 100644 index 53642ce..0000000 --- a/developer/source/tunnel-client/deprecated/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ - -* -!.gitignore - diff --git a/developer/source/tunnel-client/doc_IP_terminaology.org b/developer/source/tunnel-client/doc_IP_terminaology.org deleted file mode 100644 index 8f6587b..0000000 --- a/developer/source/tunnel-client/doc_IP_terminaology.org +++ /dev/null @@ -1,98 +0,0 @@ -#+TITLE: Interface vs Link vs Netdevice: a cynical field guide -#+AUTHOR: Thomas & Nerith (session) -#+LANGUAGE: en -#+OPTIONS: toc:2 num:t - -* TL;DR -In Linux networking (and in this doc), /interface/, /link/, and /netdevice/ can all refer to the same kernel object, e.g., wg0, x6, eth0. This conflation of terms came about because different tribes named the same thing differently. - -* What these words actually refer to -- interface: common admin usage for referring to said kernel network object. -- link: iproute2's vocabulary for said kernel network object (as in the command: `ip link show ` which gives information about said kernel network object). -- netdevice: the kernel's term (struct net_device under the hood) - -* Where the words come from -- Kernel engineers: /netdevice/ is the internal type that packets touch. -- iproute2 authors: named their subcommands by subsystem; the L2-ish one is /link/. Hence ip link, ip addr, ip route, ip rule, ip neigh. -- Humans: kept saying /interface/ because that was the word from ifconfig days and textbooks. - -* Cynic's guide to commands (map the terrain) -- ip link show x6 → show properties of interface x6 (state, mtu, type, flags); not L3 addresses or routes (here /link/ == /interface/) -- ip addr add A dev x6 → attach IP address A as a property of interface x6; this alone does not force source choice or egress path (here /dev/ = /interface/) -- ip route add dev x6 → write a route entry: map destination → interface x6 (here /dev/ = /interface/) -- ip rule add ... → write a policy rule that selects which routing table to consult -- ip neigh ... → view/manage the neighbor cache (ARP/ND) per interface; maps L3 neighbor → L2 address; not routing - - -* Device - -In computing, a /device/ is a piece of hardware. This includes printers, disk drives, memory cards, NIC cards, etc. An emulated device is software that is written to do the same thing as an actual device. This is sometimes done when compatibility with an old device is needed, but the old device is not available. A virtual device is software that is written to do the same thing as an imagined device. This is sometimes done to make available features that no physical device provides. A virtual device can also be state that is kept to support multiplexing a real device among many users, while giving each user the appearance of having sole ownership of said device. It is also common to call a device emulator a virtual device. - -In unix operating systems special files are used for interfacing to devices. Such an interface is often called a /device file/, which inevitably gets shortened to /device/. - -In networking, the kernel keeps state data for a device, and software drivers for shipping data to and from a device used for networking. Such software objects are often called /network devices/. The interface to the kernel used for talking to devices inevitably gets called a /device/. - -The terms, /physical device/, /device file/, and /netdevice/ are used to distinguish among the various possible meanings of /device/. We observe that generally terminology suffers due to a desire to flatten and thus simplify the discussion of the communication abstraction stack. - -* Interface - -An /interface/ is a shared surface between two systems. A user interface is the shared surface between a user and a system. E.g. the dashboard of a car is a user interface to the car. - -In software programming, an interface is a set of data and routines used for communication between software systems. For example, an API is a application programming interface. - -The OS provides named interfaces for communicating with network devices. Within the context of network programming, The literature will refer to such an interface as the /device/, /link/, or /interface/, the latter being the only term fitting the wider scope conventional definition. - -* Link - -A /link/ is a pathway that connects two systems. With an interface, there is no link, as the systems touch. A link has two interfaces, one on each end. Hence it was inevitable that a link interface would be called a /link/. And if the link connects to a device, then that link interface itself gets called a /device/. - -In iproute2 /link/ means the local endpoint object. Do not assume a remote counterpart exists just because you saw the word /link/. - -* Machine Peers - -- Client - -In these documents, the client machine is the local machine users are working on. Inevitably this gets shortened to /client/ in polite conversation. The example client used in this distribution is StanleyPark. That is a host name of a computer on our network. - -- Server - -In these document, the server machine is the remote machine that the write guard tunnels to. We have nicknames for machines. The example used here has the server nicknames of x6, and US. -These nicknames are also used for the names of the client machine side interface that connects to the tunnel that leads to said server machine. The nickname is also used for the name of the routing table on the client that routes traffic go said wireguard tunnel. - -Hence, a nickname, like x6 or US, refers to a machine, an interface, and an IP route table. - -* Software Peers - -Programs that run as daemons while listening for connections, and once connected to,k they provide services, are server programs. The program that connects to said software server is called a client program. You guessed it, the terms 'server program' and 'client program' often get shortened to /server/ and /client/. - - -* WireGuard mini-map -We will use this terminology: - -- We will consider that WireGuard is conceptually a virtual device. -- There can be many interfaces to said WireGuard device, taking names like wg0 or x6. Each has a keypair, a listen port, and a set of peers. -- Config tools: "wg" (CLI, not a daemon), "wg-quick" (oneshot helper per interface). -- Reality check: - - ip link show type wireguard → lists all WG interfaces - - ip -d link show x6 → detailed information about the x6 interface - - wg show x6 → peer/crypto state for the x6 interface - -* Sanity tests you can run -#+begin_src sh -# list all WireGuard interfaces -ip link show type wireguard - -# detailed view of one interface -ip -d link show x6 - -# see handshake and byte counters -wg show x6 - -# show L3 addresses bound to an interface -ip addr show dev x6 - -# show routes in a named table (if you use policy routing) -ip route show table x6 -#+end_src - - diff --git a/developer/source/tunnel-client/doc_StanleyPark.org b/developer/source/tunnel-client/doc_StanleyPark.org deleted file mode 100644 index 292ec21..0000000 --- a/developer/source/tunnel-client/doc_StanleyPark.org +++ /dev/null @@ -1,51 +0,0 @@ - -1. create/update the client configuration files. - - These are the configuration files for the machine called StanleyPark, which is on - our local network. (Yes, we capitalize popper nouns, and thus have some "bad names".) - - db_init_StanleyPark.py - stage_StanleyPark - deploy_StanleyPark - - They are in Python. - -2. Wipe the database and the stage. - - Wiping the db will erase keys and any other client configurations. This does not effect already installed configuration files. Also, the database can always be rebuilt by running the client configuration files again. - - ./db_wipe.py - ./stage_wipe.py - -3. Setup the database - - ./db_init_StanleyPark - -4. setup the keys - - ./key_generate StanleyPark.py - ./key_server_set.py - - to see the keys in the database - - ./ls_key.py - - if the database was wiped, it will be necessary to key_generate again. Currently - there is one client machine key pair. - -5. stage the configuration files to be installed - - ./stage_StanleyPark - - check them make sure they are what you want - -6. install the staged files - - ./deploy_StanlwayPark - - -The goal here is work towards each subu as a container, with its networking tunneled -to the specified interface. Perhaps the configuration scripts should be subu based instead of client machine based. Perhaps in the next version. - - - diff --git a/developer/source/tunnel-client/doc_config.org b/developer/source/tunnel-client/doc_config.org deleted file mode 100644 index 2de0ee4..0000000 --- a/developer/source/tunnel-client/doc_config.org +++ /dev/null @@ -1,9 +0,0 @@ --New interface: - -copy `db_init_iface_x6.py` to `db_init_iface_.py`, replacing with the name of the interface. Then edit `db_init_iface_.py` - --New Client - --New User - - diff --git a/developer/source/tunnel-client/doc_keys.org b/developer/source/tunnel-client/doc_keys.org deleted file mode 100644 index e56bd76..0000000 --- a/developer/source/tunnel-client/doc_keys.org +++ /dev/null @@ -1,14 +0,0 @@ - -From the point of view of setting up the client (we are in the client setup directory after all): - -1. login to the server and get the server public key. - - This public key is written into the db_init_iface_>.py configuration file. Note the examples `db_init_iface_US.py` and `db_init_iface_x6`. `x6` and `US` are nicknames for two servers. These nicknames are also used for the interface names. - - Note that the server private key remains on the server. The client has no knowledge of the server private key. It is not entered anywhere in the client configuration. - -2. run the program `key_client_generate1 - - This will print the client public key. It will also place a copy in the database. - - This will write the client private key into a local directory called `key/`. The admin need not do anything concerning this key. Scripts that need it will find it in the 'key/' directory. diff --git a/developer/source/tunnel-client/doc_stage_progs.org b/developer/source/tunnel-client/doc_stage_progs.org deleted file mode 100644 index a80f789..0000000 --- a/developer/source/tunnel-client/doc_stage_progs.org +++ /dev/null @@ -1,42 +0,0 @@ - -stage programs write to the stage directory. Later install copies from the stage -directory to a provided root, which if it is the local machine, will be '/'. - - -* stage_IP_register_route_table.py - - stages a replacement etc/iproute2/rt_tables file. - -* stage_wg_conf.py - - stages etc/wireguard/ conf files for the configured interfaces - -* stage_IP_routes_script.py - - 1. stages a shell script that when called writes the IP rule table. Said script binds UIDs to route tables. - - 2. stages a priority 10 systemd guard systemd dropin that will call said shell script when - WireGuard is started or restarted. - -* stage_IP_rules_script.py - - 1. stages a shell script that when called writes the required IP route tables - - 2. stages a priority 20 systemd guard systemd dropin that will call said shell script when - WireGuard is started or restarted. - -* stage_client_StanleyPark.py - - A local use client machine configuration file. Calls the other stage programs - while providing the correct parameters for configuring wireguard on the - machine StanleyPark. Typically these will be a database connection and a list of - users. - - The admin will write such a file for each machine he/she/ai is configuring. - -* stage_incommon.py - - Utility functions for stage programs. - - - diff --git a/developer/source/tunnel-client/iface_down.py b/developer/source/tunnel-client/iface_down.py deleted file mode 100755 index a1e6474..0000000 --- a/developer/source/tunnel-client/iface_down.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python3 -# iface_down.py — stop wg-quick@ and remove uid→rt rules - -from __future__ import annotations -import os, sys, sqlite3, subprocess -import incommon as ic # provides open_db() - -def sh(args: list[str], check: bool=False) -> subprocess.CompletedProcess: - return subprocess.run(args, text=True, capture_output=True, check=check) - -def get_rt_table_name(conn: sqlite3.Connection, iface: str) -> str: - row = conn.execute( - "SELECT rt_table_name_eff FROM v_client_effective WHERE iface=? LIMIT 1;", - (iface,) - ).fetchone() - if not row: - raise RuntimeError(f"Interface not found in DB: {iface}") - return str(row[0]) - -def get_bound_uids(conn: sqlite3.Connection, iface: str) -> list[int]: - rows = conn.execute( - """SELECT ub.uid - FROM User ub - JOIN Iface c ON c.id = ub.iface_id - WHERE c.iface=? AND ub.uid IS NOT NULL - ORDER BY ub.uid;""", - (iface,) - ).fetchall() - return [int(r[0]) for r in rows] - -def iface_down(iface: str) -> str: - if os.geteuid() != 0: - raise PermissionError("This script must be run as root.") - - # Stop interface (ignore failure) - sh(["systemctl", "stop", f"wg-quick@{iface}"]) - - # DB lookups - with ic.open_db() as conn: - table = get_rt_table_name(conn, iface) - uids = get_bound_uids(conn, iface) - - # Snapshot rules once for existence checks - rules = sh(["ip", "-4", "rule", "list"]).stdout - - removed = 0 - for uid in uids: - needle = f"uidrange {uid}-{uid} " - if needle in rules and f" lookup {table}" in rules: - # Try to delete; ignore failure to keep idempotence - sh(["ip", "-4", "rule", "del", "uidrange", f"{uid}-{uid}", "table", table]) - sh(["logger", f"iface_down: removed uid {uid} rule for table {table}"]) - removed += 1 - - return f"✅ {iface} stopped; removed {removed} uid rules from table {table}." - -def main(argv: list[str]) -> int: - if len(argv) != 1: - print(f"Usage: {os.path.basename(sys.argv[0])} ", file=sys.stderr) - return 2 - iface = argv[0] - try: - msg = iface_down(iface) - except (PermissionError, FileNotFoundError, sqlite3.Error, RuntimeError) as e: - print(f"❌ {e}", file=sys.stderr); return 1 - print(msg); return 0 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/iface_status.py b/developer/source/tunnel-client/iface_status.py deleted file mode 100755 index c0a12e9..0000000 --- a/developer/source/tunnel-client/iface_status.py +++ /dev/null @@ -1,131 +0,0 @@ -#!/usr/bin/env python3 -# iface_status.py — show unit/wg/route/uid-rule status for - -from __future__ import annotations -import os, sys, shutil, sqlite3, subprocess, time -from pathlib import Path -import incommon as ic # provides open_db() - -# --- small shell helpers ----------------------------------------------------- - -def sh(args: list[str]) -> subprocess.CompletedProcess: - """Run command; never raise; text mode; capture stdout/stderr.""" - return subprocess.run(args, text=True, capture_output=True) - -def which(cmd: str) -> bool: - return shutil.which(cmd) is not None - -def print_block(title: str, body: str | None = None) -> None: - print(f"=== {title} ===") - if body is not None and body != "": - print(body.rstrip()) - print() - -# --- DB helpers --------------------------------------------------------------- - -def get_rt_table_name(conn: sqlite3.Connection, iface: str) -> str: - row = conn.execute( - "SELECT rt_table_name_eff FROM v_client_effective WHERE iface=? LIMIT 1;", - (iface,) - ).fetchone() - if not row: - raise RuntimeError(f"Interface not found in DB: {iface}") - return str(row[0]) - -def get_bound_users(conn: sqlite3.Connection, iface: str) -> list[tuple[str, int | None]]: - rows = conn.execute( - """SELECT ub.username, ub.uid - FROM User ub - JOIN Iface c ON c.id = ub.iface_id - WHERE c.iface=? - ORDER BY ub.username;""", - (iface,) - ).fetchall() - return [(str(u), (None if v is None else int(v))) for (u, v) in rows] - -# --- core -------------------------------------------------------------------- - -def iface_status(iface: str) -> int: - # DB open + resolve table name early for helpful errors - with ic.open_db() as conn: - table = get_rt_table_name(conn, iface) - - # systemd status - en = sh(["systemctl", "is-enabled", f"wg-quick@{iface}"]) - ac = sh(["systemctl", "is-active", f"wg-quick@{iface}"]) - sys_body = "\n".join([ - (en.stdout.strip() if en.stdout.strip() else "").strip(), - (ac.stdout.strip() if ac.stdout.strip() else "").strip(), - ]).strip() - print_block(f"systemd: wg-quick@{iface}", sys_body) - - # wg presence + handshake age - wg_title = f"wg: {iface}" - if which("wg"): - if Path(f"/sys/class/net/{iface}").exists(): - lines: list[str] = ["(present)"] - # Try sudo-less handshake read; if not permitted, show hint - hs_try = sh(["sudo", "-n", "wg", "show", iface, "latest-handshakes"]) - if hs_try.returncode == 0 and hs_try.stdout.strip(): - # expected format: " " - epoch_part = hs_try.stdout.strip().split()[-1] - try: - hs = int(epoch_part) - if hs > 0: - age = int(time.time()) - hs - lines.append(f"latest-handshake: {age}s ago") - else: - lines.append("latest-handshake: none") - except ValueError: - lines.append("latest-handshake: unknown") - else: - prog = Path(sys.argv[0]).name or "iface_status.py" - lines.append(f"⚠ need sudo to read peers/handshake (try: sudo {prog} {iface})") - print_block(wg_title, "\n".join(lines)) - else: - print_block(wg_title, "(interface down or not present)") - else: - print_block(wg_title, "wg tool not found.") - - # route for table - rt = sh(["ip", "-4", "route", "show", "table", table]) - print_block(f"route: table {table}", rt.stdout if rt.stdout else "") - - # uid rules targeting table - rules = sh(["ip", "-4", "rule", "show"]).stdout.splitlines() - hits = [ln for ln in rules if f"lookup {table}" in ln] - print_block(f"uid rules → table {table}", "\n".join(hits) if hits else "(none)") - - # DB: bound users - with ic.open_db() as conn: - bound = get_bound_users(conn, iface) - - if not bound: - print_block(f"DB: bound users for {iface}", "(none)") - else: - # simple column render - header = ("username", "uid") - rows = [(u, ("" if v is None else str(v))) for (u, v) in bound] - w1 = max(len(header[0]), *(len(r[0]) for r in rows)) - w2 = max(len(header[1]), *(len(r[1]) for r in rows)) - body_lines = [f"{header[0]:<{w1}} {header[1]:<{w2}}", - f"{'-'*w1} {'-'*w2}"] - body_lines += [f"{u:<{w1}} {v:<{w2}}" for (u, v) in rows] - print_block(f"DB: bound users for {iface}", "\n".join(body_lines)) - - return 0 - -# --- cli --------------------------------------------------------------------- - -def main(argv: list[str]) -> int: - if len(argv) != 1: - print(f"Usage: {Path(sys.argv[0]).name} ", file=sys.stderr) - return 2 - try: - return iface_status(argv[0]) - except (sqlite3.Error, FileNotFoundError, RuntimeError) as e: - print(f"❌ {e}", file=sys.stderr) - return 1 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/iface_up.sh b/developer/source/tunnel-client/iface_up.sh deleted file mode 100755 index e5dbd0a..0000000 --- a/developer/source/tunnel-client/iface_up.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash -# iface_up.sh — enable/start wg-quick@ -set -euo pipefail - -(( $# == 1 )) || { echo "Usage: $0 "; exit 2; } -IFACE="$1" - -# Require root because systemd + net ops -if [[ $EUID -ne 0 ]]; then - echo "❌ This script must be run as root." >&2 - exit 1 -fi - -# Sanity: config must exist -[[ -r "/etc/wireguard/${IFACE}.conf" ]] || { - echo "❌ Missing: /etc/wireguard/${IFACE}.conf"; exit 1; } - -# Bring it up -systemctl enable --now "wg-quick@${IFACE}" - -# Quick confirmation -systemctl is-active --quiet "wg-quick@${IFACE}" \ - && echo "✅ ${IFACE} is active." \ - || { echo "⚠️ ${IFACE} failed to start."; exit 1; } diff --git a/developer/source/tunnel-client/incommon.py b/developer/source/tunnel-client/incommon.py deleted file mode 100644 index a67a0aa..0000000 --- a/developer/source/tunnel-client/incommon.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 -# Shared helpers (DB path + small SQLite utilities). No side effects on import. - -from __future__ import annotations -from pathlib import Path -import sqlite3 -from typing import Iterable, Sequence, Any, List, Tuple, Optional - -# Base paths -ROOT_DIR: Path = Path(__file__).resolve().parent -DB_PATH: Path = ROOT_DIR / "db" / "store" # default location - -def open_db(path: Optional[Path]=None) -> sqlite3.Connection: - p = path or DB_PATH - if not p.exists(): - raise FileNotFoundError(f"DB not found: {p}") - conn = sqlite3.connect(p.as_posix()) - # enforce FK; journal mode is set by schema, but enabling FK here is harmless and desired - conn.execute("PRAGMA foreign_keys = ON;") - return conn - -def rows(conn: sqlite3.Connection, sql: str, params: Sequence[Any]=()) -> List[tuple]: - cur = conn.execute(sql, tuple(params)) - out = cur.fetchall() - cur.close() - return out - -def get_client_id(conn: sqlite3.Connection, iface: str) -> int: - r = conn.execute("SELECT id FROM Iface WHERE iface=? LIMIT 1;", (iface,)).fetchone() - if not r: raise RuntimeError(f"client iface not found: {iface}") - return int(r[0]) - -# Tx helpers (optional but nice) -def begin_immediate(conn: sqlite3.Connection) -> None: - conn.execute("BEGIN IMMEDIATE;") - -def commit(conn: sqlite3.Connection) -> None: - conn.commit() - diff --git a/developer/source/tunnel-client/inspect.sh b/developer/source/tunnel-client/inspect.sh deleted file mode 100755 index be2d5ef..0000000 --- a/developer/source/tunnel-client/inspect.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -# inspect.sh — prime sudo only if needed, then run inspect_1.py -set -euo pipefail -SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" - -# If not primed, prompt via the tty (works inside Emacs shell without echoing) -if ! sudo -n true 2>/dev/null; then - sudo echo -n -fi - -sudo python3 "${SCRIPT_DIR}/inspect_1.py" "$@" diff --git a/developer/source/tunnel-client/inspect_1.py b/developer/source/tunnel-client/inspect_1.py deleted file mode 100755 index e6a179a..0000000 --- a/developer/source/tunnel-client/inspect_1.py +++ /dev/null @@ -1,362 +0,0 @@ -#!/usr/bin/env python3 -# inspect.py — deep health: DB + systemd/drop-in + wg + route + uid rules + DNS plug - -from __future__ import annotations -import os, sys, re, time, shutil, sqlite3, subprocess -from pathlib import Path -from typing import List, Tuple, Optional -import incommon as ic # open_db() - -# ---------- small shell helpers ---------- - -def sh(args: List[str]) -> subprocess.CompletedProcess: - """Run command; never raise; text mode; capture stdout/stderr.""" - return subprocess.run(args, text=True, capture_output=True) - -def which(cmd: str) -> bool: - return shutil.which(cmd) is not None - -def print_block(title: str, body: str | None = None) -> None: - print(f"=== {title} ===") - if body: print(body.rstrip()) - print() - -def format_table(headers: List[str], rows: List[Tuple]) -> str: - cols = list(zip(*([headers] + [[str(c) for c in r] for r in rows]))) if rows else [headers] - widths = [max(len(x) for x in col) for col in cols] - line = lambda r: " ".join(f"{str(c):<{w}}" for c, w in zip(r, widths)) - out = [line(headers), line(tuple("-"*w for w in widths))] - for r in rows: out.append(line(tuple("" if c is None else str(c) for c in r))) - return "\n".join(out) - -# ---------- DB helpers ---------- - -def client_row(conn: sqlite3.Connection, iface: str): - return conn.execute(""" - SELECT c.iface, - v.rt_table_name_eff AS rt_table_name, - c.bound_user, c.bound_uid, - c.local_address_cidr, - substr(c.public_key,1,10)||'…' AS pub, - c.autostart, c.updated_at - FROM Iface c - JOIN v_client_effective v ON v.id=c.id - WHERE c.iface=? LIMIT 1; - """,(iface,)).fetchone() - -def server_rows(conn: sqlite3.Connection, iface: str) -> List[tuple]: - return conn.execute(""" - SELECT s.name, - s.endpoint_host || ':' || s.endpoint_port AS endpoint, - substr(s.public_key,1,10)||'…' AS pub, - s.allowed_ips, s.keepalive_s, s.priority - FROM server s - JOIN Iface c ON c.id=s.iface_id - WHERE c.iface=? - ORDER BY s.priority, s.name; - """,(iface,)).fetchall() - -def rtname_and_cidr(conn: sqlite3.Connection, iface: str) -> Tuple[str, str]: - row = conn.execute("SELECT rt_table_name_eff, local_address_cidr FROM v_client_effective WHERE iface=? LIMIT 1;",(iface,)).fetchone() - if not row: raise RuntimeError(f"Interface not found in DB: {iface}") - return str(row[0]), str(row[1]) - -def bound_uids(conn: sqlite3.Connection, iface: str) -> List[int]: - rows = conn.execute(""" - SELECT ub.uid - FROM User ub - JOIN Iface c ON c.id=ub.iface_id - WHERE c.iface=? AND ub.uid IS NOT NULL AND ub.uid!='' - ORDER BY ub.uid; - """,(iface,)).fetchall() - return [int(r[0]) for r in rows] - -def legacy_bound_uid(conn: sqlite3.Connection, iface: str) -> Optional[int]: - r = conn.execute("SELECT bound_uid FROM Iface WHERE iface=? AND bound_uid IS NOT NULL AND bound_uid!='';",(iface,)).fetchone() - return (int(r[0]) if r and r[0] is not None and str(r[0])!="" else None) - -def primary_server_ep_and_allowed(conn: sqlite3.Connection, iface: str) -> Tuple[str,str]: - ep = conn.execute(""" - SELECT s.endpoint_host||':'||s.endpoint_port - FROM server s JOIN Iface c ON c.id=s.iface_id - WHERE c.iface=? ORDER BY s.priority, s.name LIMIT 1; - """,(iface,)).fetchone() - allow = conn.execute(""" - SELECT s.allowed_ips - FROM server s JOIN Iface c ON c.id=s.iface_id - WHERE c.iface=? ORDER BY s.priority, s.name LIMIT 1; - """,(iface,)).fetchone() - return (str(ep[0]) if ep and ep[0] else ""), (str(allow[0]) if allow and allow[0] else "") - -# ---------- file checks ---------- - -def check_file(path: str, mode_oct: int, user: str, group: str) -> str: - p = Path(path) - if not p.exists(): return f"WARN: missing {path}" - try: - st = p.stat() - actual_mode = st.st_mode & 0o777 - import pwd, grp - u = pwd.getpwuid(st.st_uid).pw_name - g = grp.getgrgid(st.st_gid).gr_name - want = f"{oct(mode_oct)[2:]} {user} {group}" - got = f"{oct(actual_mode)[2:]} {u} {g}" - if actual_mode==mode_oct and u==user and g==group: - return f"OK: {path} ({got})" - else: - return f"WARN: {path} perms/owner {got} (expected {want})" - except Exception as e: - return f"WARN: {path} stat error: {e}" - -def rt_tables_has(table: str) -> bool: - try: - txt = Path("/etc/iproute2/rt_tables").read_text() - except Exception: - return False - pat = re.compile(rf"^\s*\d+\s+{re.escape(table)}\s*$", re.M) - return pat.search(txt) is not None - -# ---------- wg helpers ---------- - -def wg_present(iface: str) -> bool: - return Path(f"/sys/class/net/{iface}").exists() - -def wg_handshake_age_sec(iface: str) -> Optional[int]: - cp = sh(["sudo","-n","wg","show",iface,"latest-handshakes"]) - if cp.returncode != 0 or not cp.stdout.strip(): return None - try: - epoch = int(cp.stdout.split()[-1]) - if epoch<=0: return None - return int(time.time()) - epoch - except Exception: - return None - -def wg_endpoints_joined(iface: str) -> str: - cp = sh(["sudo","-n","wg","show",iface,"endpoints"]) - if cp.returncode != 0: return "" - vals = [] - for line in cp.stdout.splitlines(): - parts = line.split() - if len(parts)>=2: vals.append(parts[1]) - return "".join(vals) - -def wg_allowedips_csv(iface: str) -> str: - cp = sh(["sudo","-n","wg","show",iface,"allowed-ips"]) - if cp.returncode != 0: return "" - vals=[] - for line in cp.stdout.splitlines(): - parts = line.split() - if len(parts)>=2: vals.append(parts[1]) - return ",".join(vals) - -# ---------- redact helpers ---------- - -def redact_conf(text: str) -> str: - text = re.sub(r"^(PrivateKey\s*=\s*).+$", r"\1", text, flags=re.M) - text = re.sub(r"^(PresharedKey\s*=\s*).+$", r"\1", text, flags=re.M) - return text - -def sudo_cat(path: str) -> Optional[str]: - cp = sh(["sudo","-n","cat", path]) - if cp.returncode != 0: return None - return cp.stdout - -# ---------- main inspect ---------- - -def inspect_iface(iface: str) -> int: - # DB open - with ic.open_db() as conn: - crow = client_row(conn, iface) - if not crow: - print(f"❌ client row not found for iface={iface}", file=sys.stderr); return 1 - srv_rows = server_rows(conn, iface) - rtname, local_cidr = rtname_and_cidr(conn, iface) - local_ip = local_cidr.split("/",1)[0] - db_ep, db_allowed = primary_server_ep_and_allowed(conn, iface) - uids = bound_uids(conn, iface) - leg = legacy_bound_uid(conn, iface) - if leg is not None: uids.append(leg) - - # DB snapshot - print("=== DB: client '{}' ===".format(iface)) - headers = ["iface","rt_table_name","bound_user","bound_uid","local_address_cidr","pub","autostart","updated_at"] - print(format_table(headers, [crow])) - print() - print(f"--- server for '{iface}' ---") - if srv_rows: - print(format_table(["name","endpoint","pub","allowed_ips","keepalive_s","priority"], srv_rows)) - else: - print("(none)") - print() - - # systemd + drop-in - print(f"=== systemd: wg-quick@{iface} ===") - if which("systemctl"): - en = sh(["systemctl","is-enabled",f"wg-quick@{iface}"]).stdout.strip() - ac = sh(["systemctl","is-active", f"wg-quick@{iface}"]).stdout.strip() - if en: print(en) - if ac: print(ac) - drop_dir = f"/etc/systemd/system/wg-quick@{iface}.service.d" - # common filenames: legacy 'restart.conf' or new '10-postup-IP-scripts.conf' - candidates = [f"{drop_dir}/restart.conf", f"{drop_dir}/10-postup-IP-scripts.conf"] - print(f"-- drop-in expected: {candidates[0]}") - found = [p for p in candidates if Path(p).is_file()] - if found: - print("OK: drop-in file exists") - else: - print("WARN: drop-in file missing or unreadable") - dpaths = sh(["systemctl","show",f"wg-quick@{iface}","-p","DropInPaths","--value"]).stdout.strip() - if dpaths and any(p in dpaths for p in candidates): - print("OK: drop-in is loaded by systemd") - else: - print("WARN: drop-in not reported by systemd (need daemon-reload?)") - else: - print("(systemctl not available)") - print() - - # installed targets - print("=== installed targets ===") - print(check_file(f"/etc/wireguard/{iface}.conf", 0o600, "root", "root")) - # check both possible drop-in names - d1 = check_file(f"/etc/systemd/system/wg-quick@{iface}.service.d/restart.conf", 0o644, "root", "root") - d2 = check_file(f"/etc/systemd/system/wg-quick@{iface}.service.d/10-postup-IP-scripts.conf", 0o644, "root", "root") - # show OK if either exists - if d1.startswith("OK") or d2.startswith("OK"): - print(d1 if d1.startswith("OK") else d2) - else: - # print both warnings for clarity - print(d1); print(d2) - print(check_file("/usr/local/bin/IP_rule_add_UID.sh", 0o500, "root", "root")) - print(check_file(f"/usr/local/bin/route_init_{iface}.sh", 0o500, "root", "root")) - print("OK: rt_tables entry for '{}' present".format(rtname) if rt_tables_has(rtname) - else f"WARN: rt_tables entry for '{rtname}' missing") - print() - - # wg + addr - print(f"=== wg + addr: {iface} ===") - present = wg_present(iface) - print("(present)" if present else "(interface down or not present)") - if present: - has_ip = sh(["ip","-4","addr","show","dev",iface]).stdout.find(f" {local_ip}/")>=0 - print(f"OK: {iface} has {local_ip}" if has_ip else f"WARN: {iface} missing {local_ip}") - if which("wg"): - age = wg_handshake_age_sec(iface) - if age is None: - print("latest-handshake: none") - else: - print(f"latest-handshake: {age}s ago") - if age>600: print("WARN: handshake is stale (>600s)") - # endpoint and allowed-ips comparison (requires sudo) - wg_ep = wg_endpoints_joined(iface) - if db_ep: - if wg_ep == db_ep: - print(f"OK: endpoint matches DB ({wg_ep})") - else: - print(f"WARN: endpoint mismatch (wg={wg_ep or 'n/a'} db={db_ep})") - wg_allowed = wg_allowedips_csv(iface) - if db_allowed: - if wg_allowed == db_allowed: - print(f"OK: allowed-ips match DB ({wg_allowed})") - else: - print(f"WARN: allowed-ips mismatch (wg={wg_allowed or 'n/a'} db={db_allowed})") - else: - prog = Path(sys.argv[0]).name - print(f"⚠ need sudo for handshake/peer checks (try: sudo {prog} {iface})") - print() - - # route table checks - print(f"=== route: table {rtname} ===") - rt = sh(["ip","-4","route","show","table",rtname]).stdout - print(rt or "") - def_ok = any(re.match(rf"^default\s+dev\s+{re.escape(iface)}\b", ln) for ln in rt.splitlines()) - bh_ok = any(re.match(r"^blackhole\s+default\b", ln) for ln in rt.splitlines()) - print("OK: default -> {}".format(iface) if def_ok else f"WARN: default route not on {iface}") - print("OK: blackhole guard present" if bh_ok else "WARN: blackhole guard missing") - print() - - # uid rules - print(f"=== ip rules for bound UIDs → table {rtname} ===") - rules_txt = sh(["ip","-4","rule","show"]).stdout - if uids: - for u in uids: - if re.search(rf"uidrange {u}-{u}.*lookup {re.escape(rtname)}", rules_txt): - print(f"OK: uid {u} -> table {rtname}") - else: - print(f"WARN: missing rule for uid {u} -> table {rtname}") - else: - print("(no bound UIDs recorded)") - print() - print(f"=== ip rule lines targeting '{rtname}' (all) ===") - hit_lines = [ln for ln in rules_txt.splitlines() if f"lookup {rtname}" in ln] - print("\n".join(hit_lines) if hit_lines else "(none)") - print() - - # DNS leak plug: iptables redirects - print("=== iptables nat OUTPUT DNS redirect (→ 127.0.0.1:53) ===") - if which("iptables"): - nat = sh(["iptables","-t","nat","-S","OUTPUT"]).stdout - r_udp = re.search(r"-A OUTPUT.*-p udp .* --dport 53 .* REDIRECT .*to-ports 53", nat or "") - r_tcp = re.search(r"-A OUTPUT.*-p tcp .* --dport 53 .* REDIRECT .*to-ports 53", nat or "") - print(r_udp.group(0) if r_udp else "WARN: no UDP:53 redirect") - print(r_tcp.group(0) if r_tcp else "WARN: no TCP:53 redirect") - else: - print("(iptables not available)") - print() - - # on-disk configs (redacted) - conf = f"/etc/wireguard/{iface}.conf" - drop_restart = f"/etc/systemd/system/wg-quick@{iface}.service.d/restart.conf" - drop_postup = f"/etc/systemd/system/wg-quick@{iface}.service.d/10-postup-IP-scripts.conf" - - print(f"=== file: {conf} (redacted) ===") - txt = sudo_cat(conf) - if txt is None: - print("(missing or unreadable; need sudo to view)") - else: - print(redact_conf(txt)) - print() - - pick_drop = drop_restart if Path(drop_restart).exists() else drop_postup - print(f"=== file: {pick_drop} (hooks) ===") - txt = sudo_cat(pick_drop) - if txt is None: - print("(missing or unreadable; need sudo to view)") - else: - # Show only interesting service lines if present - lines = [ln for ln in txt.splitlines() - if ln.startswith(("ExecStart","Restart","RestartSec","ExecStartPre","ExecStartPost"))] - print("\n".join(lines) if lines else txt) - print() - - # summary verdict - print("=== summary ===") - ok = True - ok &= def_ok - ok &= bh_ok - if uids: - for u in uids: - if not re.search(rf"uidrange {u}-{u}.*lookup {re.escape(rtname)}", rules_txt): ok = False - ok &= rt_tables_has(rtname) - ok &= Path(f"/etc/wireguard/{iface}.conf").exists() - ok &= (Path(drop_restart).exists() or Path(drop_postup).exists()) - ok &= wg_present(iface) - if db_ep and which("wg"): - # If wg is present and sudo works, compare endpoint; otherwise skip - wg_ep = wg_endpoints_joined(iface) - if wg_ep and wg_ep != db_ep: ok = False - print("✅ Looks consistent for '{}'.".format(iface) if ok else "⚠️ Something is off — check WARN lines above.") - return 0 if ok else 1 - -# ---------- cli ---------- - -def main(argv: List[str]) -> int: - if len(argv)!=1: - print(f"Usage: {Path(sys.argv[0]).name} ", file=sys.stderr) - return 2 - try: - return inspect_iface(argv[0]) - except (sqlite3.Error, FileNotFoundError, RuntimeError) as e: - print(f"❌ {e}", file=sys.stderr); return 1 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/inspect_client_public_key.py b/developer/source/tunnel-client/inspect_client_public_key.py deleted file mode 100755 index 95a3803..0000000 --- a/developer/source/tunnel-client/inspect_client_public_key.py +++ /dev/null @@ -1,217 +0,0 @@ -#!/usr/bin/env python3 -# inspect_client_public_key.py — show the client's WireGuard public key for one iface -# Sources checked (in this order): DB, staged conf, installed conf, kernel -# The “client public key” is generated locally from the client’s PrivateKey and must be -# copied to the **server** as the peer’s PublicKey in the server’s WireGuard config. - -from __future__ import annotations -from pathlib import Path -from typing import List, Optional, Tuple -import argparse -import os -import subprocess -import sqlite3 -import sys - -# Project helper providing DB_PATH and open_db() -import incommon as ic - -ROOT = Path(__file__).resolve().parent -DEFAULT_STAGE = ROOT / "stage" -LIVE_WG_DIR = Path("/etc/wireguard") - -def _is_root() -> bool: - return os.geteuid() == 0 - -def _format_table(headers: List[str], rows: List[Tuple]) -> str: - if not rows: - return "(none)" - cols = list(zip(*([headers] + [[("" if c is None else str(c)) for c in r] for r in rows]))) - widths = [max(len(x) for x in col) for col in cols] - def line(r): return " ".join(f"{str(c):<{w}}" for c, w in zip(r, widths)) - out = [line(headers), line(tuple("-"*w for w in widths))] - for r in rows: - out.append(line(r)) - return "\n".join(out) - -def _read_conf_private_key(conf_path: Path) -> Optional[str]: - """Return the PrivateKey value from a wg conf (first [Interface] block), or None.""" - try: - txt = conf_path.read_text() - except FileNotFoundError: - return None - section = None - for raw in txt.splitlines(): - line = raw.strip() - if not line or line.startswith("#") or line.startswith(";"): - continue - if line.startswith("[") and line.endswith("]"): - section = line[1:-1].strip() - continue - if section == "Interface": - if line.lower().startswith("privatekey"): - parts = line.split("=", 1) - if len(parts) == 2: - val = parts[1].strip() - return val if val else None - return None - -def _pub_from_private_key(priv: str) -> Optional[str]: - """Compute public key from a WireGuard base64 private key using `wg pubkey`.""" - if not priv: - return None - try: - cp = subprocess.run( - ["wg", "pubkey"], - input=(priv + "\n").encode("utf-8"), - stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, - check=True, - ) - pub = cp.stdout.decode("utf-8", "replace").strip() - return pub or None - except (subprocess.CalledProcessError, FileNotFoundError): - return None - -def _kernel_iface_public_key(iface: str) -> Optional[str]: - try: - cp = subprocess.run( - ["wg", "show", iface, "public-key"], - stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, - check=True, - ) - k = cp.stdout.decode("utf-8", "replace").strip() - return k or None - except (subprocess.CalledProcessError, FileNotFoundError): - return None - -def _db_client_public_key(conn: sqlite3.Connection, iface: str) -> Optional[str]: - row = conn.execute("SELECT public_key FROM Iface WHERE iface=? LIMIT 1;", (iface,)).fetchone() - if not row: - return None - k = row[0] - return k if k else None - -def _rel_from_stage(path: Path, stage_root: Path) -> str: - """Return a short, stage-relative display path when under stage_root.""" - try: - rel = path.relative_to(stage_root) - return str(rel) - except ValueError: - return str(path) - -def _gather(iface: str, stage_root: Path) -> Tuple[List[Tuple[str, str, str]], List[str]]: - """ - Return (rows, notes) - rows: list of (source, location, public_key or "(missing)") - """ - notes: List[str] = [] - - # DB - db_pub: Optional[str] = None - if ic.DB_PATH.exists(): - try: - with ic.open_db() as conn: - db_pub = _db_client_public_key(conn, iface) - except sqlite3.Error as e: - notes.append(f"DB error: {e}") - else: - notes.append(f"DB not found at {ic.DB_PATH}") - - # staged conf -> derive pub from PrivateKey - staged_conf = stage_root / "etc" / "wireguard" / f"{iface}.conf" - staged_priv = _read_conf_private_key(staged_conf) - staged_pub = _pub_from_private_key(staged_priv) if staged_priv else None - if staged_priv is None and staged_conf.exists(): - notes.append(f"staged conf present but PrivateKey missing: { _rel_from_stage(staged_conf, stage_root) }") - - # live conf -> derive pub from PrivateKey - live_conf = LIVE_WG_DIR / f"{iface}.conf" - live_priv = _read_conf_private_key(live_conf) - live_pub = _pub_from_private_key(live_priv) if live_priv else None - if live_conf.exists() and live_priv is None: - notes.append(f"installed conf present but PrivateKey missing: {live_conf}") - - # kernel - kern_pub = _kernel_iface_public_key(iface) - - rows: List[Tuple[str, str, str]] = [] - rows.append(("DB", f"Iface.public_key[{iface}]", db_pub or "(missing)")) - rows.append(("Stage", _rel_from_stage(staged_conf, stage_root), - staged_pub or ("(missing)" if not staged_conf.exists() else "(could not derive)"))) - rows.append(("Installed", str(live_conf), - live_pub or ("(missing)" if not live_conf.exists() else "(could not derive)"))) - rows.append(("Kernel", f"wg show {iface} public-key", kern_pub or "(missing)")) - - # Quick consistency summary - present = [v for _s, _loc, v in rows if not v.startswith("(")] - if len(present) >= 2: - all_same = all(v == present[0] for v in present[1:]) - if all_same: - notes.append("All present sources agree.") - else: - notes.append("Mismatch detected between sources.") - elif len(present) == 1: - notes.append("Only one source has a key (cannot check consistency).") - else: - notes.append("No source has a client public key.") - - return (rows, notes) - -def inspect_client_public_key(iface: str, stage_root: Optional[Path] = None) -> str: - """ - Business function: returns a formatted report string. - """ - sr = stage_root or DEFAULT_STAGE - rows, notes = _gather(iface, sr) - - header = ( - f"Client public key inspection for iface '{iface}'\n" - "This public key is generated locally from the client’s PrivateKey and must be\n" - "installed on the *server* as the peer’s PublicKey in the server’s WireGuard config.\n" - ) - table = _format_table(["source", "where", "public_key"], rows) - if notes: - note_block = "\nNotes:\n- " + "\n- ".join(notes) - else: - note_block = "" - return f"{header}\n{table}\n{note_block}\n" - -def main(argv: Optional[List[str]] = None) -> int: - ap = argparse.ArgumentParser( - description="Inspect the client’s WireGuard public key for a single interface." - ) - # Make iface optional so we can aggregate errors ourselves - ap.add_argument("iface", nargs="?", help="interface name (e.g., x6)") - ap.add_argument("--stage-root", default=str(DEFAULT_STAGE), help="stage directory (default: ./stage)") - args = ap.parse_args(argv) - - # Aggregate invocation errors - errors: List[str] = [] - if not _is_root(): - errors.append("must run as root (needs access to /etc/wireguard and wg)") - if not args.iface: - errors.append("missing required positional argument: iface") - if args.stage_root: - sr = Path(args.stage_root) - if not sr.exists(): - errors.append(f"--stage-root does not exist: {sr}") - elif not sr.is_dir(): - errors.append(f"--stage-root is not a directory: {sr}") - - if errors: - ap.print_usage(sys.stderr) - print(f"{ap.prog}: error: " + "; ".join(errors), file=sys.stderr) - return 2 - - try: - report = inspect_client_public_key(args.iface, Path(args.stage_root)) - print(report, end="") - return 0 - except Exception as e: - print(f"❌ {e}", file=sys.stderr) - return 2 - -if __name__ == "__main__": - sys.exit(main()) diff --git a/developer/source/tunnel-client/install_staged_tree.py b/developer/source/tunnel-client/install_staged_tree.py deleted file mode 100755 index e1225d5..0000000 --- a/developer/source/tunnel-client/install_staged_tree.py +++ /dev/null @@ -1,245 +0,0 @@ -#!/usr/bin/env python3 -""" -install_staged_tree.py - -A dumb installer: copy staged files into the target root with backups and -deterministic permissions. No systemd stop/start, no daemon-reload. - -Given: - - A staged tree (default: ./stage) containing any of: - /usr/local/bin/apply_ip_state.sh - /etc/wireguard/*.conf - /etc/systemd/system/wg-quick@IFACE.service.d/*.conf - /etc/iproute2/rt_tables - - A destination root (default: /). Parent dirs may be created with --create-dirs. - -Does: - - For each whitelisted staged file: - * if a target already exists, copy it back into the stage as a timestamped backup - * atomically replace target with staged version - * set root:root ownership (best-effort) and explicit permissions - - Prints a summary and suggests next steps (e.g., ./start_iface.py ) - -Returns: - - Exit 0 on success; non-zero on error -""" - -from __future__ import annotations -from pathlib import Path -from typing import Dict, Iterable, List, Optional, Sequence, Tuple -import argparse -import datetime as dt -import hashlib -import os -import shutil -import sys - -ROOT = Path(__file__).resolve().parent -DEFAULT_STAGE = ROOT / "stage" - -# Whitelisted install targets → mode -# (These are *relative* to the stage root) -MODE_RULES: List[Tuple[str, int]] = [ - ("usr/local/bin", 0o500), # files under here (scripts) - ("etc/wireguard", 0o600), # *.conf - ("etc/systemd/system", 0o644), # wg-quick@*.service.d/*.conf - ("etc/iproute2", 0o644), # rt_tables -] - -def _sha256(path: Path) -> str: - h = hashlib.sha256() - with path.open("rb") as f: - for chunk in iter(lambda: f.read(1<<20), b""): - h.update(chunk) - return h.hexdigest() - -def _ensure_parents(dest_root: Path, rel: Path, create: bool) -> None: - parent = (dest_root / rel).parent - if parent.exists(): - return - if not create: - raise RuntimeError(f"missing parent directory: {parent}") - parent.mkdir(parents=True, exist_ok=True) - -def _backup_existing_to_stage(stage_root: Path, dest_root: Path, rel: Path) -> Optional[Path]: - """If target exists, copy it back into stage/_backups// and return backup path.""" - target = dest_root / rel - if not target.exists(): - return None - ts = dt.datetime.utcnow().strftime("%Y%m%dT%H%M%SZ") - backup = stage_root / "_backups" / ts / rel - backup.parent.mkdir(parents=True, exist_ok=True) - shutil.copy2(target, backup) - return backup - -def _atomic_install(src: Path, dst: Path, mode: int) -> None: - tmp = dst.with_suffix(dst.suffix + ".tmp") - shutil.copyfile(src, tmp) - os.chmod(tmp, mode) - try: - os.chown(tmp, 0, 0) # best-effort; may fail if not root - except PermissionError: - pass - os.replace(tmp, dst) - -def _mode_for_rel(rel: Path) -> Optional[int]: - """Choose a mode based on the relative path bucket.""" - s = str(rel) - if s.startswith("usr/local/bin/"): - return 0o500 - if s.startswith("etc/wireguard/") and rel.suffix == ".conf": - return 0o600 - if s == "etc/iproute2/rt_tables": - return 0o644 - if s.startswith("etc/systemd/system/") and s.endswith(".conf"): - return 0o644 - return None - -def _iter_stage_targets(stage_root: Path) -> List[Path]: - """Return a list of *relative* paths under stage that match our whitelist.""" - rels: List[Path] = [] - - # /usr/local/bin/* - bin_dir = stage_root / "usr" / "local" / "bin" - if bin_dir.is_dir(): - for p in sorted(bin_dir.glob("*")): - if p.is_file(): - rels.append(p.relative_to(stage_root)) - - # /etc/wireguard/*.conf - wg_dir = stage_root / "etc" / "wireguard" - if wg_dir.is_dir(): - for p in sorted(wg_dir.glob("*.conf")): - rels.append(p.relative_to(stage_root)) - - # /etc/systemd/system/wg-quick@*.service.d/*.conf - sysd_dir = stage_root / "etc" / "systemd" / "system" - if sysd_dir.is_dir(): - for p in sorted(sysd_dir.rglob("wg-quick@*.service.d/*.conf")): - rels.append(p.relative_to(stage_root)) - - # /etc/iproute2/rt_tables - rt = stage_root / "etc" / "iproute2" / "rt_tables" - if rt.is_file(): - rels.append(rt.relative_to(stage_root)) - - return rels - -def _discover_ifaces_from_stage(stage_root: Path) -> List[str]: - """Peek into staged artifacts to guess iface names (for friendly next-steps).""" - names = set() - - # from /etc/wireguard/.conf - wg_dir = stage_root / "etc" / "wireguard" - if wg_dir.is_dir(): - for p in wg_dir.glob("*.conf"): - names.add(p.stem) - - # from /etc/systemd/system/wg-quick@.service.d/ - sysd = stage_root / "etc" / "systemd" / "system" - if sysd.is_dir(): - for d in sysd.glob("wg-quick@*.service.d"): - name = d.name - # name looks like: wg-quick@X.service.d - at = name.find("@") - dot = name.find(".service.d") - if at != -1 and dot != -1 and dot > at: - names.add(name[at+1:dot]) - - return sorted(names) - -def install_staged_tree( - stage_root: Path, - dest_root: Path, - create_dirs: bool = False, - skip_identical: bool = True, -) -> Tuple[List[str], List[str]]: - """ - Copy files from stage_root to dest_root. - Returns (logs, detected_ifaces). - """ - old_umask = os.umask(0o077) - logs: List[str] = [] - try: - staged = _iter_stage_targets(stage_root) - if not staged: - raise RuntimeError("nothing to install (stage is empty or whitelist didn’t match)") - - for rel in staged: - src = stage_root / rel - dst = dest_root / rel - - mode = _mode_for_rel(rel) - if mode is None: - logs.append(f"skip (not whitelisted): {rel}") - continue - - _ensure_parents(dest_root, rel, create_dirs) - - backup = _backup_existing_to_stage(stage_root, dest_root, rel) - if backup: - logs.append(f"backup: {dst} -> {backup}") - - if skip_identical and dst.exists(): - try: - if _sha256(src) == _sha256(dst): - logs.append(f"identical: skip {rel}") - continue - except Exception: - pass - - _atomic_install(src, dst, mode) - logs.append(f"install: {rel} (mode {oct(mode)})") - - ifaces = _discover_ifaces_from_stage(stage_root) - return (logs, ifaces) - finally: - os.umask(old_umask) - -def _require_root(allow_nonroot: bool) -> None: - if not allow_nonroot and os.geteuid() != 0: - raise RuntimeError("must run as root (use --force-nonroot to override)") - -def main(argv: Optional[Sequence[str]] = None) -> int: - ap = argparse.ArgumentParser(description="Install staged artifacts into a target root. No service control.") - ap.add_argument("--stage", default=str(DEFAULT_STAGE)) - ap.add_argument("--root", default="/") - ap.add_argument("--create-dirs", action="store_true", help="create missing parent directories") - ap.add_argument("--no-skip-identical", action="store_true", help="always replace even if content identical") - ap.add_argument("--force-nonroot", action="store_true", help="allow non-root install (ownership may be wrong)") - args = ap.parse_args(argv) - - try: - _require_root(allow_nonroot=args.force_nonroot) - logs, ifaces = install_staged_tree( - stage_root=Path(args.stage), - dest_root=Path(args.root), - create_dirs=args.create_dirs, - skip_identical=(not args.no_skip_identical), - ) - for line in logs: - print(line) - - # Summary + suggested next steps - print("\n=== Summary ===") - print(f"Installed {sum(1 for l in logs if l.startswith('install:'))} file(s).") - if ifaces: - lst = " ".join(ifaces) - print(f"Detected interfaces from stage: {lst}") - print(f"\nNext steps:") - print(f" # (optional) verify configs") - print(f" sudo wg-quick strip /etc/wireguard/{ifaces[0]}.conf >/dev/null 2>&1 || true") - print(f"\n # start interfaces") - print(f" sudo ./start_iface.py {lst}") - else: - print("No interfaces detected in staged artifacts.") - print("\nNext steps:") - print(" # start your interface(s)") - print(" sudo ./start_iface.py [more ifaces]") - return 0 - except Exception as e: - print(f"❌ install failed: {e}", file=sys.stderr) - return 2 - -if __name__ == "__main__": - sys.exit(main()) diff --git a/developer/source/tunnel-client/key/.gitignore b/developer/source/tunnel-client/key/.gitignore deleted file mode 100644 index 53642ce..0000000 --- a/developer/source/tunnel-client/key/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ - -* -!.gitignore - diff --git a/developer/source/tunnel-client/key_client_generate.py b/developer/source/tunnel-client/key_client_generate.py deleted file mode 100755 index 96df023..0000000 --- a/developer/source/tunnel-client/key_client_generate.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python3 -# key_client_generate.py — generate a machine-wide WG keypair -# Usage: ./key_client_generate.py -# - Writes private key to: key/ -# - Updates ALL client.public_key in local DB (no private key stored in DB) - -from __future__ import annotations -import sys, shutil, subprocess, sqlite3, os -from pathlib import Path -import incommon as ic # ROOT_DIR, DB_PATH, open_db() - -def generate_keypair() -> tuple[str, str]: - if not shutil.which("wg"): - raise RuntimeError("wg not found; install wireguard-tools") - priv = subprocess.run(["wg","genkey"], check=True, text=True, capture_output=True).stdout.strip() - pub = subprocess.run(["wg","pubkey"], check=True, input=priv.encode(), capture_output=True).stdout.decode().strip() - # quick sanity - if not (43 <= len(pub) <= 45): - raise RuntimeError(f"generated public key length looks wrong ({len(pub)})") - return priv, pub - -def write_private_key(machine: str, private_key: str) -> Path: - key_dir = ic.ROOT_DIR / "key" - key_dir.mkdir(parents=True, exist_ok=True) - out_path = key_dir / machine - if out_path.exists(): - raise FileExistsError(f"refusing to overwrite existing private key file: {out_path}") - with open(out_path, "w", encoding="utf-8") as f: - f.write(private_key + "\n") - os.chmod(out_path, 0o600) - return out_path - -def update_client_public_keys(pub: str) -> int: - if not ic.DB_PATH.exists(): - raise FileNotFoundError(f"DB not found: {ic.DB_PATH}") - with ic.open_db() as conn: - cur = conn.execute( - "UPDATE Iface " - " SET public_key=?, updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now');", - (pub,) - ) - conn.commit() - return cur.rowcount or 0 - -def main(argv: list[str]) -> int: - if len(argv) != 1: - print(f"Usage: {Path(sys.argv[0]).name} ", file=sys.stderr) - return 2 - machine = argv[0] - try: - priv, pub = generate_keypair() - out_path = write_private_key(machine, priv) - n = update_client_public_keys(pub) - print(f"wrote: {out_path.relative_to(ic.ROOT_DIR)} (600)") - print(f"updated client.public_key for {n} row(s)") - print(f"public_key: {pub}") - return 0 - except (RuntimeError, FileExistsError, FileNotFoundError, sqlite3.Error, subprocess.CalledProcessError) as e: - print(f"❌ {e}", file=sys.stderr) - return 1 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/key_server_set.py b/developer/source/tunnel-client/key_server_set.py deleted file mode 100755 index f53022e..0000000 --- a/developer/source/tunnel-client/key_server_set.py +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env python3 -# key_server_set.py — set a server's public key by nickname -# Usage: ./key_server_set.py - -from __future__ import annotations -import sys, sqlite3 -from pathlib import Path -import incommon as ic # DB_PATH, open_db() - -def valid_pub(pub: str) -> bool: - # wg public keys are base64-like and typically 44 chars; allow 43–45 as used elsewhere - return isinstance(pub, str) and (43 <= len(pub.strip()) <= 45) - -def set_server_pubkey(server_name: str, pubkey: str) -> int: - if not ic.DB_PATH.exists(): - raise FileNotFoundError(f"DB not found: {ic.DB_PATH}") - with ic.open_db() as conn: - cur = conn.execute( - "UPDATE server " - " SET public_key=?, updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') " - " WHERE name=?;", - (pubkey.strip(), server_name) - ) - conn.commit() - return cur.rowcount or 0 - -def main(argv: list[str]) -> int: - if len(argv) != 2: - print(f"Usage: {Path(sys.argv[0]).name} ", file=sys.stderr) - return 2 - name, pub = argv - if not valid_pub(pub): - print(f"❌ public_key length looks wrong ({len(pub)})", file=sys.stderr) - return 1 - try: - n = set_server_pubkey(name, pub) - if n == 0: - print(f"⚠️ no matching server rows for name='{name}'") - else: - print(f"updated server.public_key for {n} row(s) where name='{name}'") - return 0 - except (sqlite3.Error, FileNotFoundError) as e: - print(f"❌ {e}", file=sys.stderr) - return 1 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/ls_iface.py b/developer/source/tunnel-client/ls_iface.py deleted file mode 100755 index e9454f0..0000000 --- a/developer/source/tunnel-client/ls_iface.py +++ /dev/null @@ -1,89 +0,0 @@ -#!/usr/bin/env python3 -""" -ls_client.py — list client from the DB - -Default output: interface names, one per line. - -Options: - -i, --iface IFACE Filter to a single interface (exact match) - -l, --long Show a table with iface, rt_table_name, rt_table_id, addr, autostart, updated_at - -h, --help Show usage -""" - -from __future__ import annotations -import sys -import argparse -import sqlite3 -from typing import List, Tuple -import incommon as ic # DB_PATH, open_db() - -def parse_args(argv: List[str]) -> argparse.Namespace: - ap = argparse.ArgumentParser(add_help=False, prog="ls_client.py", description="List client from the DB") - ap.add_argument("-i","--iface", help="Filter by interface (exact match)") - ap.add_argument("-l","--long", action="store_true", help="Long table output") - ap.add_argument("-h","--help", action="help", help="Show this help and exit") - return ap.parse_args(argv) - -def fmt_table(headers: List[str], rows: List[Tuple]) -> str: - if not rows: return "" - # normalize to strings; keep empty for None - rows = [[("" if c is None else str(c)) for c in r] for r in rows] - cols = list(zip(*([headers] + rows))) - widths = [max(len(x) for x in col) for col in cols] - line = lambda r: " ".join(f"{str(c):<{w}}" for c, w in zip(r, widths)) - out = [line(headers), line(tuple("-"*w for w in widths))] - out += [line(r) for r in rows] - return "\n".join(out) - -def list_names(conn: sqlite3.Connection, iface: str | None) -> int: - if iface: - rows = conn.execute("SELECT iface FROM Iface WHERE iface=? ORDER BY iface;", (iface,)).fetchall() - else: - rows = conn.execute("SELECT iface FROM Iface ORDER BY iface;").fetchall() - for (name,) in rows: - print(name) - return 0 - -def list_long(conn: sqlite3.Connection, iface: str | None) -> int: - if iface: - rows = conn.execute(""" - SELECT c.iface, - v.rt_table_name_eff AS rt_table_name, - COALESCE(c.rt_table_id,'') AS rt_table_id, - c.local_address_cidr, - c.autostart, - c.updated_at - FROM Iface c - JOIN v_client_effective v ON v.id = c.id - WHERE c.iface = ? - ORDER BY c.iface; - """, (iface,)).fetchall() - else: - rows = conn.execute(""" - SELECT c.iface, - v.rt_table_name_eff AS rt_table_name, - COALESCE(c.rt_table_id,'') AS rt_table_id, - c.local_address_cidr, - c.autostart, - c.updated_at - FROM Iface c - JOIN v_client_effective v ON v.id = c.id - ORDER BY c.iface; - """).fetchall() - - hdr = ["iface","rt_table_name","rt_table_id","addr","autostart","updated_at"] - txt = fmt_table(hdr, rows) - if txt: print(txt) - return 0 - -def main(argv: List[str]) -> int: - args = parse_args(argv) - try: - with ic.open_db() as conn: - return list_long(conn, args.iface) if args.long else list_names(conn, args.iface) - except (sqlite3.Error, FileNotFoundError) as e: - print(f"❌ {e}", file=sys.stderr) - return 2 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/ls_key.py b/developer/source/tunnel-client/ls_key.py deleted file mode 100755 index 535c7c9..0000000 --- a/developer/source/tunnel-client/ls_key.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 -# ls_keys.py — list WireGuard public keys only -# Usage: -# ./ls_keys.py # all client/server -# ./ls_keys.py -i x6 # only iface x6 - -from __future__ import annotations -import sys, argparse, sqlite3 -from pathlib import Path -from typing import List, Tuple -import incommon as ic # DB_PATH, open_db() - -def format_table(headers: List[str], rows: List[Tuple]) -> str: - if not rows: - return "(none)" - cols = list(zip(*([headers] + [[("" if c is None else str(c)) for c in r] for r in rows]))) - widths = [max(len(x) for x in col) for col in cols] - def line(r): return " ".join(f"{str(c):<{w}}" for c, w in zip(r, widths)) - out = [line(headers), line(tuple("-"*w for w in widths))] - for r in rows: out.append(line(r)) - return "\n".join(out) - -def list_client_keys(conn: sqlite3.Connection, iface: str | None, banner=False) -> str: - if banner: - print("\n=== Public keys generated locally by client, probably by using `key_client_generate.py`===") - rows = conn.execute( - "SELECT iface, public_key AS client_public_key " - "FROM Iface " - + ("WHERE iface=? " if iface else "") - + "ORDER BY iface;", - ((iface,) if iface else tuple()), - ).fetchall() - return format_table(["iface","client_public_key"], rows) - -def list_server_keys(conn: sqlite3.Connection, iface: str | None ,banner=False) -> str: - if banner: - print("\n=== Public keys imported from remote server, probably edited into db_init_server_.py ===") - rows = conn.execute( - "SELECT c.iface AS client, s.name AS server, s.public_key AS server_public_key " - "FROM server s JOIN Iface c ON c.id = s.iface_id " - + ("WHERE c.iface=? " if iface else "") - + "ORDER BY c.iface, s.name;", - ((iface,) if iface else tuple()), - ).fetchall() - return format_table(["client","server","server_public_key"], rows) - -def client_pub_for_iface(conn: sqlite3.Connection, iface: str) -> str | None: - r = conn.execute("SELECT public_key FROM Iface WHERE iface=? LIMIT 1;", (iface,)).fetchone() - return (r[0] if r and r[0] else None) - -def main(argv: List[str]) -> int: - ap = argparse.ArgumentParser(description="List WireGuard public keys from the local DB.") - ap.add_argument("-i","--iface", help="filter for one iface (e.g., x6)") - args = ap.parse_args(argv) - - try: - # Ensure DB exists - if not ic.DB_PATH.exists(): - print(f"❌ DB not found: {ic.DB_PATH}", file=sys.stderr) - return 1 - with ic.open_db() as conn: - print(list_client_keys(conn, args.iface, banner=True)) - print() - print(list_server_keys(conn, args.iface, banner=True)) - if args.iface: - cpub = client_pub_for_iface(conn, args.iface) - if cpub: - print() - print("# Copy to server peer config if needed:") - print(f'CLIENT_PUB="{cpub}"') - return 0 - except (sqlite3.Error, FileNotFoundError) as e: - print(f"❌ {e}", file=sys.stderr) - return 1 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/ls_server.py b/developer/source/tunnel-client/ls_server.py deleted file mode 100755 index e1ee92d..0000000 --- a/developer/source/tunnel-client/ls_server.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python3 -""" -ls_server.py — list server from the DB - -Default output: server names, one per line. - -Options: - -i, --iface IFACE Filter to a single client interface (e.g., x6, US) - -l, --long Show a table with client, name, endpoint, allowed_ips, priority - -h, --help Show usage -""" - -from __future__ import annotations -import sys -import sqlite3 -import argparse -from typing import List, Tuple -import incommon as ic # DB_PATH, open_db() - -def parse_args(argv: List[str]) -> argparse.Namespace: - ap = argparse.ArgumentParser(add_help=False, prog="ls_server.py", description="List server from the DB") - ap.add_argument("-i","--iface", help="Filter by client interface") - ap.add_argument("-l","--long", action="store_true", help="Long table output") - ap.add_argument("-h","--help", action="help", help="Show this help and exit") - return ap.parse_args(argv) - -def fmt_table(headers: List[str], rows: List[Tuple]) -> str: - if not rows: return "" - cols = list(zip(*([headers] + [[("" if c is None else str(c)) for c in r] for r in rows]))) - widths = [max(len(x) for x in col) for col in cols] - line = lambda r: " ".join(f"{str(c):<{w}}" for c, w in zip(r, widths)) - out = [line(headers), line(tuple("-"*w for w in widths))] - for r in rows: out.append(line(r)) - return "\n".join(out) - -def list_names(conn: sqlite3.Connection, iface: str | None) -> int: - if iface: - rows = conn.execute(""" - SELECT s.name - FROM server s - JOIN Iface c ON c.id = s.iface_id - WHERE c.iface = ? - ORDER BY s.name - """, (iface,)).fetchall() - else: - rows = conn.execute("SELECT name FROM server ORDER BY name").fetchall() - for (name,) in rows: - print(name) - return 0 - -def list_long(conn: sqlite3.Connection, iface: str | None) -> int: - if iface: - rows = conn.execute(""" - SELECT c.iface, - s.name, - s.endpoint_host || ':' || CAST(s.endpoint_port AS TEXT) AS endpoint, - s.allowed_ips, - s.priority - FROM server s - JOIN Iface c ON c.id = s.iface_id - WHERE c.iface = ? - ORDER BY c.iface, s.priority, s.name - """, (iface,)).fetchall() - else: - rows = conn.execute(""" - SELECT c.iface, - s.name, - s.endpoint_host || ':' || CAST(s.endpoint_port AS TEXT) AS endpoint, - s.allowed_ips, - s.priority - FROM server s - JOIN Iface c ON c.id = s.iface_id - ORDER BY c.iface, s.priority, s.name - """).fetchall() - - hdr = ["client","name","endpoint","allowed_ips","priority"] - txt = fmt_table(hdr, rows) - if txt: print(txt) - return 0 - -def main(argv: List[str]) -> int: - args = parse_args(argv) - try: - with ic.open_db() as conn: - return list_long(conn, args.iface) if args.long else list_names(conn, args.iface) - except (sqlite3.Error, FileNotFoundError) as e: - print(f"❌ {e}", file=sys.stderr) - return 2 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/ls_server_setting.py b/developer/source/tunnel-client/ls_server_setting.py deleted file mode 100755 index 594cd70..0000000 --- a/developer/source/tunnel-client/ls_server_setting.py +++ /dev/null @@ -1,137 +0,0 @@ -#!/usr/bin/env python3 -""" -ls_server_settings.py — print server-side WireGuard [Peer] stanzas from the DB - -Purpose: - Emit configuration that belongs in a *server* wg conf (e.g., /etc/wireguard/wg0.conf). - One [Peer] block per (client, server) row. - -What is printed (per block): - - PublicKey = client's public key (from client.public_key) - - AllowedIPs = client's tunnel address(es) as seen by the server (from client.local_address_cidr) - (Use /32 per client. If multiple /32 per client are later added, enumerate them.) - - PresharedKey = server.preshared_key (only if present) - -Notes: - - Endpoint is NOT set on the server for client peers (client usually dials the server). - - PersistentKeepalive is generally set on the client; server may omit it. - -Usage: - ./ls_server_settings.py # all client and their server entries - ./ls_server_settings.py x6 us # only for these client ifaces - ./ls_server_settings.py --server x6 # filter by server.name -""" - -from __future__ import annotations -import sys, sqlite3 -from typing import Iterable, List, Optional, Sequence, Tuple -from pathlib import Path - -# local helper import is optional; only used to locate db path if present -try: - import incommon as ic - DB_PATH = ic.DB_PATH -except Exception: - DB_PATH = Path(__file__).resolve().parent / "db" / "store" - -def die(msg: str, code: int = 1) -> None: - print(f"❌ {msg}", file=sys.stderr); sys.exit(code) - -def open_db(path: Path) -> sqlite3.Connection: - if not path.exists(): die(f"DB not found: {path}") - return sqlite3.connect(path.as_posix()) - -def parse_args(argv: Sequence[str]) -> Tuple[List[str], Optional[str]]: - ifaces: List[str] = [] - server_filter: Optional[str] = None - it = iter(argv) - for a in it: - if a == "--server": - try: server_filter = next(it) - except StopIteration: die("--server requires a value") - else: - ifaces.append(a) - return ifaces, server_filter - -def rows(conn: sqlite3.Connection, q: str, params: Iterable = ()) -> List[tuple]: - cur = conn.execute(q, tuple(params)) - out = cur.fetchall() - cur.close() - return out - -def collect(conn: sqlite3.Connection, ifaces: List[str], server_filter: Optional[str]) -> List[dict]: - where = [] - args: List = [] - if ifaces: - ph = ",".join("?" for _ in ifaces) - where.append(f"c.iface IN ({ph})") - args.extend(ifaces) - if server_filter: - where.append("s.name = ?") - args.append(server_filter) - w = ("WHERE " + " AND ".join(where)) if where else "" - q = f""" - SELECT c.id, c.iface, c.public_key, c.local_address_cidr, - s.name, s.preshared_key, s.endpoint_host, s.endpoint_port - FROM Iface c - LEFT JOIN server s ON s.iface_id = c.id - {w} - ORDER BY s.name, c.iface, s.priority ASC, s.id ASC; - """ - R = rows(conn, q, args) - out: List[dict] = [] - for cid, iface, cpub, cidr, sname, psk, host, port in R: - out.append({ - "iface_id": cid, - "iface": iface or "", - "client_pub": cpub or "", - "client_cidr": cidr or "", - "server_name": sname or "(unassigned)", - "server_host": host or "", - "server_port": port or None, - "psk": psk or None, - }) - return out - -def print_header() -> None: - print("# === Server-side WireGuard peer stanzas ===") - print("# Place each [Peer] block into the server's wg conf (e.g., /etc/wireguard/wg0.conf).") - print("# Endpoint is not set for client peers on the server.") - print("# AllowedIPs must be /32 per client address; enumerate multiple /32 if a client uses several.") - print() - -def print_blocks(items: List[dict]) -> None: - if not items: - print("# (no rows matched)"); return - print_header() - # group by server_name for readability - cur_group = None - for r in items: - grp = r["server_name"] - if grp != cur_group: - cur_group = grp - ep = f" ({r['server_host']}:{r['server_port']})" if r["server_host"] and r["server_port"] else "" - print(f"## Server: {grp}{ep}") - # stanza - print("[Peer]") - print(f"# client iface={r['iface']} tunnel={r['client_cidr']}") - print(f"PublicKey = {r['client_pub']}") - # AllowedIPs: prefer the exact CIDR stored for the client (typically /32) - print(f"AllowedIPs = {r['client_cidr']}") - if r["psk"]: - print(f"PresharedKey = {r['psk']}") - print() - # end - -def main(argv: Sequence[str]) -> int: - ifaces, server_filter = parse_args(argv) - try: - with open_db(DB_PATH) as conn: - items = collect(conn, ifaces, server_filter) - except sqlite3.Error as e: - die(f"sqlite error: {e}") - print_blocks(items) - return 0 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/ls_servers.sh b/developer/source/tunnel-client/ls_servers.sh deleted file mode 100755 index 5d4f4ef..0000000 --- a/developer/source/tunnel-client/ls_servers.sh +++ /dev/null @@ -1,7 +0,0 @@ - -# ls_server.sh -#!/usr/bin/env bash -set -euo pipefail -DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" -DB="$DIR/db/store" -sqlite3 -noheader -batch "$DB" "SELECT name FROM server ORDER BY name;" diff --git a/developer/source/tunnel-client/ls_user.py b/developer/source/tunnel-client/ls_user.py deleted file mode 100755 index 90c0ef2..0000000 --- a/developer/source/tunnel-client/ls_user.py +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env python3 -""" -ls_users.py — print " " from DB (names only) - -- Validates required tables exist (client, User) -- No side effects; read-only -""" - -from __future__ import annotations -import sys -import sqlite3 -import incommon as ic # DB_PATH, open_db() - -HELP = """Usage: ls_users.py -Prints one line per user binding as: " ". -""" - -def tables_ok(conn: sqlite3.Connection) -> bool: - row = conn.execute( - """ - SELECT - (SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='client'), - (SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='User') - """ - ).fetchone() - return row == (1, 1) - -def list_users(conn: sqlite3.Connection) -> None: - cur = conn.execute( - """ - SELECT ub.username, c.iface - FROM User ub - JOIN Iface c ON c.id = ub.iface_id - ORDER BY c.iface, ub.username - """ - ) - for username, iface in cur.fetchall(): - print(f"{username} {iface}") - -def main(argv: list[str]) -> int: - if argv and argv[0] in ("-h", "--help"): - print(HELP.strip()); return 0 - try: - with ic.open_db() as conn: - if not tables_ok(conn): - print("❌ Missing tables (client/User). Initialize the database first.", file=sys.stderr) - return 1 - list_users(conn) - return 0 - except (sqlite3.Error, FileNotFoundError) as e: - print(f"❌ {e}", file=sys.stderr) - return 2 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/manual_reference.org b/developer/source/tunnel-client/manual_reference.org deleted file mode 100644 index 6b0b894..0000000 --- a/developer/source/tunnel-client/manual_reference.org +++ /dev/null @@ -1,90 +0,0 @@ -#+title: WireGuard Client — Reference -#+author: Thomas / Aerenis -#+startup: showall - -* Directory layout (wg/) -- =schema.sql= :: SQLite schema for clients/servers/routes/meta (keys stored in DB). -- =wg_client.db= :: SQLite DB (created by =db_init.sh=). -- =db_init.sh= :: Creates/initializes DB from =schema.sql= (user-space). -- =client_create_keys.sh= :: Creates a fresh client keypair for an =iface= and stores into DB. -- =config_client_StanleyPark.sh= :: Upserts the StanleyPark client row (iface, addr, mtu, dns_mode, autostart, etc.). -- =config_server_x6.sh= :: Upserts the remote server (“x6”) row linked to the client. -- =bind_user.sh= :: Binds a Linux username (and resolves UID) to a client interface in DB. -- =ls_clients.sh= :: Lists interface names only (one per line). -- =ls_servers.sh= :: Lists server names (optionally grouped by client). -- =ls_users.sh= :: Lists = = pairs. -- =inspect.sh= :: Shows effective config from DB and current system state for a given iface. -- =IP_rule_add_UID.sh= :: Helper installed to =/usr/local/bin= (adds =ip rule uidrange= entries idempotently). -- =stage_generate.sh= :: Builds staged artifacts from DB: - - =stage/wireguard/.conf= - - =stage/systemd/wg-quick@.d/restart.conf= - - =stage/usr/local/bin/routes_init_.sh= - - copies =IP_rule_add_UID.sh= into stage for install - - Offers to clean stage first; supports =--clean=, =--no-clean=, =--dry-clean= -- =stage_install.sh= :: Copies staged files into: - - =/etc/wireguard/.conf= - - =/etc/systemd/system/wg-quick@.d/restart.conf= - - =/usr/local/bin/routes_init_.sh= - - =/usr/local/bin/IP_rule_add_UID.sh= - - Reloads systemd daemon and prints next steps. -- =stage_clean.sh= :: Empties =./stage= safely (with confirmation). -- =routes_init_x6.sh= :: (Legacy) Example per-iface route script; superseded by staged =routes_init_.sh= -- =deprecated/= :: Old scripts retained for reference. -- =stage/= :: Generated artifacts awaiting installation. -- =scratchpad/= :: (Optional) Temporary workspace for ad-hoc edits before installation. - -* Schema (summary) -- =clients= - - =iface= (TEXT UNIQUE): bare interface name (e.g., ‘x6’) - - =rt_table_id= (INTEGER): e.g., 1002 - - =rt_table_name= (TEXT): defaults to iface if NULL (used by route scripts and =ip rule=) - - =bound_user= (TEXT), =bound_uid= (INTEGER): Linux user + UID that should egress via this iface - - =local_address_cidr=, =private_key=, =public_key=, =mtu=, =fwmark= - - =dns_mode= (‘none’ or ‘static’), =dns_servers= (if static) - - =autostart= (0/1) -- =servers= - - Linked by =client_id= → =clients.id= - - =name= (‘x6’), =public_key=, optional =preshared_key= - - =endpoint_host=, =endpoint_port=, =allowed_ips=, =keepalive_s= - - =route_allowed_ips= (0/1): when 0, =Table= is set to =off= in wg conf and routing is handled by our scripts - - =priority= (lower preferred) — first by priority then id is staged -- =routes= - - Linked by =client_id= - - =cidr=, optional =via=, optional =table_name= (else use client rt name), optional =metric= - - =on_up= (1/0), =on_down= (1/0) — generator emits only =on_up= routes in =routes_init_.sh= -- =meta= - - =schema= key describing current schema version/string - -* Generated files (stage/) -- wireguard/.conf :: - - =[Interface]= :: Address, PrivateKey, optional MTU/FwMark/DNS, optional =Table= off - - =[Peer]= :: Server public key, optional PSK, Endpoint, AllowedIPs, optional PersistentKeepalive -- systemd/wg-quick@.d/restart.conf :: - - Restart policy; force fresh link; =ExecStartPost= hooks: - - routes init script - - =IP_rule_add_UID.sh = (if bound) - - logger line -- usr/local/bin/routes_init_.sh :: - - Installs default route to device in =rt_table_name= and a blackhole default guard - - Adds any DB =routes= with =on_up=1 - -* Operational Notes -- =iface= names are bare (not prefixed with =wg_=). Systemd unit is =wg-quick@.service=. -- Unbound rides the tunnel; leave WireGuard DNS unset (=dns_mode=none=) unless you want static DNS in the conf. -- Copy-based install preserves an audit trail in =./stage=. Clean explicitly when desired. - -* Security -- The DB contains *private keys*. Restrict permissions: - #+begin_src bash - chmod 600 wg_client.db - #+end_src -- Back up =wg_client.db= securely. - -* Troubleshooting -- If unit fails to start: =journalctl -u wg-quick@ -b= -- Handshake age / peer state: =wg show= -- Routing: =ip rule=, =ip route show table = -- Regenerate & reinstall on mismatch: - #+begin_src bash - ./stage_generate.sh --clean && sudo ./stage_install.sh && sudo systemctl restart wg-quick@ - #+end_src diff --git a/developer/source/tunnel-client/manual_user.org b/developer/source/tunnel-client/manual_user.org deleted file mode 100644 index bef4b37..0000000 --- a/developer/source/tunnel-client/manual_user.org +++ /dev/null @@ -1,104 +0,0 @@ -#+title: WireGuard Client — Admin User Guide -#+author: Thomas / Aerenis -#+startup: showall - -* Overview -Authoritative state lives here: -- ~/executable/setup/Debian12_client/wg/ -- Keys + config live in *SQLite* (./db/store). -- You *stage* generated files in ./stage/, then *install* as root. -- Interface names are *bare* (e.g., =x6=, =US=). Unit: =wg-quick@=; config: =/etc/wireguard/.conf=. -- Unbound is used for DNS; typically =dns_mode= is =none= (no =DNS= line in WG conf). -- Staging dirs are not auto-cleaned; each of =db/=, =stage/=, =scratchpad/= contains a =.gitignore= that ignores everything except itself. - -* Typical Workflow (example: x6) -1) Initialize DB -#+begin_src bash -./db_init.sh -#+end_src - -2) Create/Update *client* record for this host (inserts the =x6= row) -#+begin_src bash -./config_client_StanleyPark.sh -#+end_src - -3) Create/rotate *client keys* (writes keys into DB for =x6=) -#+begin_src bash -./client_create_keys.sh x6 -#+end_src - -4) Configure the *remote server* record (x6) -#+begin_src bash -./config_server_x6.sh -#+end_src - -5) Bind Linux user(s) to interface (traffic steering via uid rules) -#+begin_src bash -./user_to_iface.sh Thomas-x6 x6 -# or bulk: -./user_all_to_iface.sh -#+end_src -Verify: -#+begin_src bash -./ls_users.sh -#+end_src - -6) Generate staged files (will offer to clean ./stage first) -#+begin_src bash -./stage_generate.sh -#+end_src -Review contents of =./stage= (WG conf, systemd drop-in, route script). - -7) Install (as root) — copies staged files into the system -#+begin_src bash -sudo ./stage_install.sh -#+end_src - -8) Enable & start the interface -#+begin_src bash -sudo systemctl enable wg-quick@x6 -sudo systemctl start wg-quick@x6 -#+end_src - -9) Inspect / validate -#+begin_src bash -./inspect.sh x6 -ip rule | grep x6 -ip route show table x6 -wg show -#+end_src - -* Key Rotation (client) -- Update keys in DB and redeploy: -#+begin_src bash -./client_create_keys.sh x6 -./stage_generate.sh --clean -sudo ./stage_install.sh -sudo systemctl restart wg-quick@x6 -#+end_src -- Then update the *server’s* peer public key accordingly. - -* Listing helpers -- Interfaces: -#+begin_src bash -./ls_clients.sh # prints: x6, US, ... -#+end_src -- Servers (per client): -#+begin_src bash -./ls_servers.sh # prints server names per client -#+end_src -- User bindings: -#+begin_src bash -./ls_users.sh # prints: -#+end_src - -* Notes -- =./stage= is not auto-cleaned. Use: -#+begin_src bash -./stage_clean.sh -#+end_src -- Protect your DB (contains private keys): -#+begin_src bash -chmod 700 db -chmod 600 db/store -#+end_src diff --git a/developer/source/tunnel-client/scratchpad/.gitignore b/developer/source/tunnel-client/scratchpad/.gitignore deleted file mode 100644 index 53642ce..0000000 --- a/developer/source/tunnel-client/scratchpad/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ - -* -!.gitignore - diff --git a/developer/source/tunnel-client/stage/.gitignore b/developer/source/tunnel-client/stage/.gitignore deleted file mode 100644 index 53642ce..0000000 --- a/developer/source/tunnel-client/stage/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ - -* -!.gitignore - diff --git a/developer/source/tunnel-client/stage_IP_apply_script.py b/developer/source/tunnel-client/stage_IP_apply_script.py deleted file mode 100755 index 82e2baa..0000000 --- a/developer/source/tunnel-client/stage_IP_apply_script.py +++ /dev/null @@ -1,508 +0,0 @@ -#!/usr/bin/env python3 -""" -stage_IP_apply_script.py - -Given: - - A SQLite DB (schema you’ve defined), with: - * Iface(id, iface, local_address_cidr, rt_table_name, rt_table_id) - * v_iface_effective(id, rt_table_name_eff, local_address_cidr) - * Route(iface_id, cidr, via, table_name, metric, on_up, on_down) - * "User"(iface_id, username, uid) — table formerly User_Binding - * Meta(key='subu_cidr', value) - - A list of interface names to include (e.g., ["x6","US"]). - -Does: - - Reads DB once and *synthesizes a single* idempotent runtime script - that, for the selected interfaces, on each `wg-quick@IFACE` start: - 1) resets IPv4 addresses on the iface (delete-if-present, then add) - 2) ensures all configured routes exist (using `ip -4 route replace`) - 3) resets policy rules by preference number (delete-by-pref, then add) - with **per-iface prefs** to avoid collisions. - - Stages that script under: stage/usr/local/bin/ - - Stages per-iface systemd drop-ins: - stage/etc/systemd/system/wg-quick@IFACE.service.d/-postup-IP-state.conf - which call the script (default prio = 20). - - Stages a merged copy of rt_tables (does not write the live /etc/iproute2/rt_tables). - -Returns: - (script_path, notes[list of strings]) - -Errors: - - Raises RuntimeError if no interfaces provided or there’s nothing to emit. - - Does not modify kernel state — this is staging only. - -Notes: - - Addresses: reset pattern (del → add) for deterministic convergence. - - Routes: `ip -4 route replace` (best-practice) with tolerant logging. - - Rules: reset by `pref` (del-by-pref → add). Prefs are unique per iface: - base = 17000 + Iface.id * 10 - from_pref = base + 0 - uid_pref = base + 1 - - The runtime script accepts optional IFACE args to limit application. -""" - -from __future__ import annotations -from pathlib import Path -from typing import Dict, Iterable, List, Optional, Sequence, Tuple -import argparse -import sqlite3 -import sys - -import incommon as ic # expected: open_db() - -ROOT = Path(__file__).resolve().parent -STAGE_ROOT = ROOT / "stage" - -RT_TABLES_PATH = Path("/etc/iproute2/rt_tables") - - -# ---------- helpers for notes ---------- - -def _stage_note(path: Path, stage_root: Path) -> str: - """Return a short path like 'stage:/usr/local/bin/apply_IP_state.sh'.""" - try: - rel = path.relative_to(stage_root) - return f"stage:/{rel.as_posix()}" - except ValueError: - return str(path) - - -# ---------- rt_tables helpers ---------- - -def _parse_rt_tables(path: Path) -> Tuple[List[str], Dict[str, int], set[int]]: - """ - Returns (lines, name_to_num, used_nums). - Keeps original lines for a non-destructive merge. - """ - text = path.read_text() if path.exists() else "" - lines = text.splitlines() - name_to_num: Dict[str, int] = {} - used_nums: set[int] = set() - for ln in lines: - s = ln.strip() - if not s or s.startswith("#"): - continue - parts = s.split() - if len(parts) >= 2 and parts[0].isdigit(): - n = int(parts[0]); nm = parts[1] - if nm not in name_to_num and n not in used_nums: - name_to_num[nm] = n - used_nums.add(n) - return (lines, name_to_num, used_nums) - - -def _first_free_id(used_nums: Iterable[int], low: int, high: int) -> int: - used = set(used_nums) - for n in range(low, high + 1): - if n not in used: - return n - raise RuntimeError(f"no free routing-table IDs in [{low},{high}]") - - -def _stage_rt_tables( - stage_root: Path, - meta: Dict[str, Tuple[int, Optional[int], str, Optional[str]]], - low: int = 20000, - high: int = 29999 -) -> Tuple[Path, List[str]]: - """ - Ensure entries for all effective table names present in `meta`. - Prefer DB rt_table_id when available and not conflicting. - Write merged file to stage/etc/iproute2/rt_tables. - Returns (staged_path, notes) - """ - lines, name_to_num, used_nums = _parse_rt_tables(RT_TABLES_PATH) - - # Build eff_name -> preferred_num mapping (first non-None rt_id wins) - eff_to_preferred: Dict[str, Optional[int]] = {} - for _n, (_iid, rtid, eff, _cidr) in meta.items(): - if eff not in eff_to_preferred: - eff_to_preferred[eff] = rtid if rtid is not None else None - - additions: List[Tuple[int, str]] = [] - for eff_name, preferred_num in eff_to_preferred.items(): - if eff_name in name_to_num: - continue # already present - if preferred_num is not None and preferred_num not in used_nums: - num = preferred_num - else: - num = _first_free_id(used_nums, low, high) - name_to_num[eff_name] = num - used_nums.add(num) - additions.append((num, eff_name)) - - out = stage_root / "etc" / "iproute2" / "rt_tables" - out.parent.mkdir(parents=True, exist_ok=True) - - if not additions: - # still write a copy of current file so install step is uniform - out.write_text("\n".join(lines) + ("\n" if lines else "")) - return (out, ["rt_tables: no additions (kept existing map)"]) - - new_lines = list(lines) - for num, name in sorted(additions): - new_lines.append(f"{num} {name}") - - out.write_text("\n".join(new_lines) + "\n") - notes = [f"rt_tables: add {num} {name}" for num, name in sorted(additions)] - return (out, notes) - - -# ---------- DB access ---------- - -def _fetch_meta_subu_cidr(conn: sqlite3.Connection, default="10.0.0.0/24") -> str: - row = conn.execute("SELECT value FROM Meta WHERE key='subu_cidr' LIMIT 1;").fetchone() - return str(row[0]) if row and row[0] else default - - -def _fetch_iface_meta(conn: sqlite3.Connection, iface_names: Sequence[str]) -> Dict[str, Tuple[int, Optional[int], str, Optional[str]]]: - """ - Return {iface_name -> (iface_id, rt_table_id, rt_table_name_eff, local_address_cidr_or_None)}. - """ - if not iface_names: - return {} - ph = ",".join("?" for _ in iface_names) - sql = f""" - SELECT i.id, - i.iface, - i.rt_table_id, - v.rt_table_name_eff, - NULLIF(TRIM(v.local_address_cidr),'') AS cidr - FROM Iface i - JOIN v_iface_effective v ON v.id = i.id - WHERE i.iface IN ({ph}) - ORDER BY i.id; - """ - rows = conn.execute(sql, tuple(iface_names)).fetchall() - out: Dict[str, Tuple[int, Optional[int], str, Optional[str]]] = {} - for r in rows: - iface_id = int(r[0]); name = str(r[1]) - rt_id = (int(r[2]) if r[2] is not None else None) - eff = str(r[3]) - cidr = (str(r[4]) if r[4] is not None else None) - out[name] = (iface_id, rt_id, eff, cidr) - return out - - -def _fetch_routes_by_iface_id( - conn: sqlite3.Connection, - iface_ids: Sequence[int], - only_on_up: bool = True -) -> Dict[int, List[Tuple[str, Optional[str], Optional[str], Optional[int]]]]: - """ - Return {iface_id -> [(cidr, via, table_name_or_None, metric_or_None), ...]}. - """ - if not iface_ids: - return {} - ph = ",".join("?" for _ in iface_ids) - sql = f""" - SELECT iface_id, - cidr, - NULLIF(TRIM(via),'') AS via, - NULLIF(TRIM(table_name),'') AS table_name, - metric, - on_up - FROM Route - WHERE iface_id IN ({ph}) - ORDER BY id; - """ - rows = conn.execute(sql, tuple(iface_ids)).fetchall() - out: Dict[int, List[Tuple[str, Optional[str], Optional[str], Optional[int]]]] = {} - for iface_id, cidr, via, tname, metric, on_up in rows: - if only_on_up and int(on_up) != 1: - continue - out.setdefault(int(iface_id), []).append( - (str(cidr), - (str(via) if via is not None else None), - (str(tname) if tname is not None else None), - (int(metric) if metric is not None else None)) - ) - return out - - -def _fetch_uids_by_iface_id(conn: sqlite3.Connection, iface_ids: Sequence[int]) -> Dict[int, List[int]]: - """ - Return {iface_id -> [uid, ...]} using table "User". - """ - if not iface_ids: - return {} - ph = ",".join("?" for _ in iface_ids) - sql = f""" - SELECT iface_id, - uid - FROM "User" - WHERE iface_id IN ({ph}) - AND uid IS NOT NULL - AND CAST(uid AS TEXT) != '' - ORDER BY iface_id, uid; - """ - rows = conn.execute(sql, tuple(iface_ids)).fetchall() - out: Dict[int, List[int]] = {} - for iface_id, uid in rows: - out.setdefault(int(iface_id), []).append(int(uid)) - return out - - -# ---------- rendering ---------- - -def _render_composite_script( - plan_ifaces: List[str], - meta: Dict[str, Tuple[int, Optional[int], str, Optional[str]]], - routes_by_id: Dict[int, List[Tuple[str, Optional[str], Optional[str], Optional[int]]]], - uids_by_id: Dict[int, List[int]], - subu_cidr: str -) -> str: - """ - Build a single bash script that ensures addresses → routes → rules. - """ - lines: List[str] = [ - "#!/usr/bin/env bash", - "# apply IP state for selected interfaces (addresses, routes, rules) — idempotent", - "set -euo pipefail", - "", - "ALL_ARGS=(\"$@\")", - "", - "want_iface(){", - " local t=$1", - " if [ ${#ALL_ARGS[@]} -eq 0 ]; then return 0; fi", - " for a in \"${ALL_ARGS[@]}\"; do [ \"$a\" = \"$t\" ] && return 0; done", - " return 1", - "}", - "", - "exists_iface(){ ip -o link show dev \"$1\" >/dev/null 2>&1; }", - "", - "# Reset address: delete the exact CIDR if present, then add it back.", - "reset_addr(){", - " local iface=$1; local cidr=$2", - " ip -4 addr del \"$cidr\" dev \"$iface\" >/dev/null 2>&1 || true", - " if ip -4 addr add \"$cidr\" dev \"$iface\"; then", - " logger \"addr set: $iface $cidr\"", - " else", - " logger \"addr add failed (non-fatal): $iface $cidr\"", - " fi", - "}", - "", - "# Ensure route using replace; log but do not fail the unit if kernel says 'exists'.", - "ensure_route(){", - " local table=$1; local cidr=$2; local dev=$3; local via=${4:-}; local metric=${5:-}", - " if [ -n \"$via\" ] && [ -n \"$metric\" ]; then", - " if ip -4 route replace \"$cidr\" via \"$via\" dev \"$dev\" table \"$table\" metric \"$metric\" 2>/dev/null; then", - " logger \"route ensure: table=$table cidr=$cidr dev=$dev via=$via metric=$metric\"", - " else", - " logger \"route ensure (tolerated failure): table=$table cidr=$cidr dev=$dev via=$via metric=$metric\"", - " fi", - " elif [ -n \"$via\" ]; then", - " if ip -4 route replace \"$cidr\" via \"$via\" dev \"$dev\" table \"$table\" 2>/dev/null; then", - " logger \"route ensure: table=$table cidr=$cidr dev=$dev via=$via\"", - " else", - " logger \"route ensure (tolerated failure): table=$table cidr=$cidr dev=$dev via=$via\"", - " fi", - " elif [ -n \"$metric\" ]; then", - " if ip -4 route replace \"$cidr\" dev \"$dev\" table \"$table\" metric \"$metric\" 2>/dev/null; then", - " logger \"route ensure: table=$table cidr=$cidr dev=$dev metric=$metric\"", - " else", - " logger \"route ensure (tolerated failure): table=$table cidr=$cidr dev=$dev metric=$metric\"", - " fi", - " else", - " if ip -4 route replace \"$cidr\" dev \"$dev\" table \"$table\" 2>/dev/null; then", - " logger \"route ensure: table=$table cidr=$cidr dev=$dev\"", - " else", - " logger \"route ensure (tolerated failure): table=$table cidr=$cidr dev=$dev\"", - " fi", - " fi", - "}", - "", - "# Reset a policy rule by numeric preference: delete-by-pref, then add.", - "reset_IP_rule(){", - " # Usage: reset_IP_rule ", - " local pref=$1; shift", - " ip -4 rule del pref \"$pref\" >/dev/null 2>&1 || true", - " if ip -4 rule add \"$@\" pref \"$pref\"; then", - " logger \"rule set: pref=$pref $*\"", - " else", - " logger \"rule add failed (non-fatal): pref=$pref $*\"", - " fi", - "}", - "", - ] - - any_action = False - - # 1) Addresses (reset) - for name in plan_ifaces: - _iid, _rtid, rtname, cidr = meta[name] - if cidr: - lines += [ - f'if want_iface {name}; then', - f' if exists_iface {name}; then reset_addr {name} {cidr}; else logger "skip: iface missing: {name}"; fi', - 'fi' - ] - any_action = True - - # 2) Routes - for name in plan_ifaces: - iid, _rtid, rtname, _cidr = meta[name] - rows = routes_by_id.get(iid, []) - for cidr, via, t_override, metric in rows: - table_eff = t_override or rtname - viastr = (via if via is not None else "") - mstr = (str(metric) if metric is not None else "") - lines += [ - f'if want_iface {name}; then', - f' if exists_iface {name}; then ensure_route "{table_eff}" "{cidr}" "{name}" "{viastr}" "{mstr}"; else logger "skip: iface missing: {name}"; fi', - 'fi' - ] - any_action = True - - # 3) Rules (reset by pref: src-cidr, uids, and one global prohibit) - for name in plan_ifaces: - iid, _rtid, rtname, cidr = meta[name] - - # Per-iface preference block (no collisions) - base_pref = 17000 + iid * 10 - from_pref = base_pref + 0 - uid_pref = base_pref + 1 - - if cidr: - lines += [ - f'if want_iface {name}; then', - f' reset_IP_rule {from_pref} from "{cidr}" lookup "{rtname}"', - 'fi' - ] - any_action = True - - uids = uids_by_id.get(iid, []) - for u in uids: - lines += [ - f'if want_iface {name}; then', - f' reset_IP_rule {uid_pref} uidrange "{u}-{u}" lookup "{rtname}"', - 'fi' - ] - any_action = True - - if subu_cidr: - lines += [ - f'reset_IP_rule 18050 from "{subu_cidr}" prohibit' - ] - any_action = True - - if not any_action: - raise RuntimeError("no IP state to emit for requested interfaces") - - lines += [""] - return "\n".join(lines) - - -def _write_dropin_for_iface(stage_root: Path ,iface: str ,script_name: str ,priority: int) -> Path: - # correct systemd path: /etc/systemd/system/wg-quick@IFACE.service.d/ - d = stage_root / "etc" / "systemd" / "system" / f"wg-quick@{iface}.service.d" - d.mkdir(parents=True ,exist_ok=True) - p = d / f"{priority}-postup-IP-state.conf" - content = ( - "[Service]\n" - f"ExecStartPost=+/usr/local/bin/{script_name} {iface}\n" - ) - p.write_text(content) - return p - - -# ---------- business ---------- - -def stage_IP_apply_script( - conn: sqlite3.Connection, - iface_names: Sequence[str], - stage_root: Optional[Path] = None, - script_name: str = "apply_IP_state.sh", - dropin_priority: int = 20, - only_on_up: bool = True, - with_dropins: bool = True, - dry_run: bool = False -) -> Tuple[Path, List[str]]: - """ - Plan and stage the unified runtime script, a merged rt_tables, and per-iface drop-ins. - """ - if not iface_names: - raise RuntimeError("no interfaces provided") - - meta = _fetch_iface_meta(conn, iface_names) - if not meta: - raise RuntimeError("none of the requested interfaces exist in DB") - - # preserve caller order but skip unknowns (already handled above) - ifaces_in_order = [n for n in iface_names if n in meta] - iface_ids = [meta[n][0] for n in ifaces_in_order] - - routes_by_id = _fetch_routes_by_iface_id(conn, iface_ids, only_on_up=only_on_up) - uids_by_id = _fetch_uids_by_iface_id(conn, iface_ids) - subu_cidr = _fetch_meta_subu_cidr(conn, default="10.0.0.0/24") - - sr = stage_root or STAGE_ROOT - out = sr / "usr" / "local" / "bin" / script_name - out.parent.mkdir(parents=True, exist_ok=True) - - content = _render_composite_script(ifaces_in_order, meta, routes_by_id, uids_by_id, subu_cidr) - - notes: List[str] = [] - if dry_run: - notes.append(f"dry-run: would write {_stage_note(out, sr)}") - if with_dropins: - for n in ifaces_in_order: - notes.append(f"dry-run: would write {_stage_note(sr / 'etc' / 'systemd' / 'system' / f'wg-quick@{n}.service.d' / f'{dropin_priority}-postup-IP-state.conf', sr)}") - rt_out = sr / "etc" / "iproute2" / "rt_tables" - notes.append(f"dry-run: would write {_stage_note(rt_out, sr)}") - return (out, notes) - - # ensure rt_tables entries for the effective names used by these ifaces - rt_path, rt_notes = _stage_rt_tables(sr, meta) - notes.extend(rt_notes) - notes.append(f"staged: {_stage_note(rt_path, sr)}") - - out.write_text(content) - out.chmod(0o500) - notes.append(f"staged: {_stage_note(out, sr)}") - - if with_dropins: - for n in ifaces_in_order: - dp = _write_dropin_for_iface(sr, n, script_name, dropin_priority) - notes.append(f"staged: {_stage_note(dp, sr)}") - - return (out, notes) - -# Backwards-compatible alias for callers that still import the old name. -stage_ip_apply_script = stage_IP_apply_script - - -# ---------- CLI ---------- - -def main(argv=None) -> int: - ap = argparse.ArgumentParser(description="Stage one script that applies IP addresses, routes, and rules for selected ifaces.") - ap.add_argument("ifaces", nargs="+", help="interface names to include") - ap.add_argument("--script-name", default="apply_IP_state.sh") - ap.add_argument("--dropin-priority", type=int, default=20) - ap.add_argument("--all", action="store_true", help="include routes where on_up=0 as well") - ap.add_argument("--no-dropins", action="store_true", help="do not stage systemd drop-ins") - ap.add_argument("--dry-run", action="store_true") - args = ap.parse_args(argv) - - with ic.open_db() as conn: - try: - out, notes = stage_IP_apply_script( - conn, - args.ifaces, - script_name=args.script_name, - dropin_priority=args.dropin_priority, - only_on_up=(not args.all), - with_dropins=(not args.no_dropins), - dry_run=args.dry_run - ) - except Exception as e: - print(f"error: {e}", file=sys.stderr) - return 2 - - if notes: - print("\n".join(notes)) - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/developer/source/tunnel-client/stage_StanleyPark.py b/developer/source/tunnel-client/stage_StanleyPark.py deleted file mode 100755 index 77264a3..0000000 --- a/developer/source/tunnel-client/stage_StanleyPark.py +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env python3 -""" -stage_StanleyPark.py - -Minimal config wrapper for client 'StanleyPark'. -Calls the generic stage orchestrator with the chosen ifaces. -""" - -from __future__ import annotations -from stage_client import stage_client_artifacts - -CLIENT = "StanleyPark" -IFACES = ["x6","US"] # keep this list minimal & declarative - -if __name__ == "__main__": - ok = stage_client_artifacts( - CLIENT - ,IFACES - ) - raise SystemExit(0 if ok else 2) diff --git a/developer/source/tunnel-client/stage_client.py b/developer/source/tunnel-client/stage_client.py deleted file mode 100755 index 918e6bb..0000000 --- a/developer/source/tunnel-client/stage_client.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python3 -""" -stage_client.py - -Given: - - A SQLite DB via incommon.open_db() - - A client machine name (for WG PrivateKey lookup under ./key/) - - One or more interface names (e.g., x6, US) - -Does: - 1) Stage WireGuard confs for each iface - 2) Stage a unified IP apply script (addresses, routes, rules) + per-iface drop-ins - -Returns: - True on success, False on failure (prints progress) -""" - -from __future__ import annotations -from pathlib import Path -from typing import Callable ,Optional ,Sequence ,Tuple -import argparse -import subprocess -import sys - -import incommon as ic # open_db() - -ROOT = Path(__file__).resolve().parent -STAGE_ROOT = ROOT / "stage" - - -def _msg_wrapped_call(label: str ,fn: Callable[[], Tuple[Path ,Sequence[str]]]) -> bool: - print(f"→ {label}") - try: - path ,notes = fn() - for n in notes: - print(n) - if path: - print(f"✔ {label}: staged: {path}") - else: - print(f"✔ {label}") - return True - except Exception as e: - print(f"❌ {label}: {e}") - return False - - -def _call_cli(argv: Sequence[str]) -> Tuple[Path ,Sequence[str]]: - cp = subprocess.run(list(argv) ,text=True ,capture_output=True) - if cp.returncode != 0: - raise RuntimeError(cp.stderr.strip() or f"exit {cp.returncode}") - notes = [] - staged_path: Optional[Path] = None - for line in (cp.stdout or "").splitlines(): - notes.append(line) - if line.startswith("staged: "): - try: - staged_path = Path(line.split("staged:",1)[1].strip()) - except Exception: - pass - return (staged_path or STAGE_ROOT ,notes) - - -def _stage_wg_conf_step(client_name: str ,ifaces: Sequence[str]) -> bool: - def _do(): - try: - from stage_wg_conf import stage_wg_conf # type: ignore - with ic.open_db() as conn: - path ,notes = stage_wg_conf( - conn - ,ifaces - ,client_name - ,stage_root=STAGE_ROOT - ,dry_run=False - ) - return (path ,notes) - except Exception: - return _call_cli([str(ROOT / "stage_wg_conf.py") ,client_name ,*ifaces]) - return _msg_wrapped_call(f"stage_wg_conf ({client_name}; {','.join(ifaces)})" ,_do) - - -def _stage_apply_ip_state_step(ifaces: Sequence[str]) -> bool: - def _do(): - try: - from stage_IP_apply_script import stage_ip_apply_script # type: ignore - with ic.open_db() as conn: - path ,notes = stage_ip_apply_script( - conn - ,ifaces - ,stage_root=STAGE_ROOT - ,script_name="apply_ip_state.sh" - ,only_on_up=True - ,dry_run=False - ) - return (path ,notes) - except Exception: - return _call_cli([str(ROOT / "stage_IP_apply_script.py") ,*ifaces]) - return _msg_wrapped_call(f"stage_IP_apply_script ({','.join(ifaces)})" ,_do) - - -def stage_client_artifacts( - client_name: str - ,iface_names: Sequence[str] - ,stage_root: Optional[Path] = None -) -> bool: - if not iface_names: - raise ValueError("no interfaces provided") - if stage_root: - global STAGE_ROOT - STAGE_ROOT = stage_root - - STAGE_ROOT.mkdir(parents=True ,exist_ok=True) - - ok = True - ok = _stage_wg_conf_step(client_name ,iface_names) and ok - ok = _stage_apply_ip_state_step(iface_names) and ok - return ok - - -def main(argv: Optional[Sequence[str]] = None) -> int: - ap = argparse.ArgumentParser(description="Stage all artifacts for a client.") - ap.add_argument("--client" ,required=True ,help="client machine name (for key lookup)") - ap.add_argument("ifaces" ,nargs="+") - args = ap.parse_args(argv) - - ok = stage_client_artifacts( - args.client - ,args.ifaces - ) - return 0 if ok else 2 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/developer/source/tunnel-client/stage_wg_conf.py b/developer/source/tunnel-client/stage_wg_conf.py deleted file mode 100755 index 28dd4d3..0000000 --- a/developer/source/tunnel-client/stage_wg_conf.py +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env python3 -""" -stage_wg_conf.py - -Given: - - SQLite DB reachable via incommon.open_db() - - A list of interface names (e.g., x6 ,US) - - client_machine_name used to locate the private key file under ./key/ - -Does: - - For each iface, stage a minimal WireGuard config to stage/etc/wireguard/.conf: - [Interface] - PrivateKey = > - Table = off - ListenPort = (if the column exists and value is not NULL) - # ListenPort = 51820 (commented if value is absent) - [Peer] (one per Server row for that iface) - PublicKey = - PresharedKey = (only if present) - AllowedIPs = - Endpoint = : - PersistentKeepalive = (only if present) - - Omits Address ,PostUp ,SaveConfig (your systemd drop-in + script handle L3 state) - -Returns: - - (list_of_staged_paths ,notes) - -Errors: - - Missing private key file - - Iface not found - - Server rows missing required fields for that iface -""" - -from __future__ import annotations -from pathlib import Path -from typing import Dict ,Iterable ,List ,Optional ,Sequence ,Tuple -import argparse -import sqlite3 -import sys - -import incommon as ic # expected: open_db() - -ROOT = Path(__file__).resolve().parent -STAGE_ROOT = ROOT / "stage" - - -# ---------- helpers ---------- - -def _has_column(conn: sqlite3.Connection ,table: str ,col: str) -> bool: - cur = conn.execute(f"PRAGMA table_info({table});") - cols = [str(r[1]) for r in cur.fetchall()] - return col in cols - - -def _read_private_key(client_machine_name: str ,key_root: Optional[Path] = None) -> str: - kr = key_root or (ROOT / "key") - path = kr / client_machine_name - if not path.exists(): - raise RuntimeError(f"private key file missing: {path}") - text = path.read_text().strip() - if not text: - raise RuntimeError(f"private key file empty: {path}") - # WireGuard private keys are base64 (typically 44 chars), but don't over-validate here. - return text - - -# ---------- DB ---------- - -def _fetch_iface_ids_and_ports( - conn: sqlite3.Connection - ,iface_names: Sequence[str] -) -> Dict[str ,Tuple[int ,Optional[int]]]: - """ - Return {iface_name -> (iface_id ,listen_port_or_None)} for requested names. - If the listen_port column does not exist, value is None. - """ - if not iface_names: - return {} - ph = ",".join("?" for _ in iface_names) - has_lp = _has_column(conn ,"Iface" ,"listen_port") - select_lp = ", i.listen_port" if has_lp else ", NULL as listen_port" - sql = f""" - SELECT i.id - , i.iface - {select_lp} - FROM Iface i - WHERE i.iface IN ({ph}) - ORDER BY i.id; - """ - rows = conn.execute(sql ,tuple(iface_names)).fetchall() - out: Dict[str ,Tuple[int ,Optional[int]]] = {} - for iid ,name ,lp in rows: - out[str(name)] = (int(iid) ,(int(lp) if lp is not None else None)) - return out - - -def _fetch_peers_for_iface( - conn: sqlite3.Connection - ,iface_id: int -) -> List[Tuple[str ,Optional[str] ,str ,int ,str ,Optional[int] ,int ,int]]: - """ - Return peers as tuples: - (public_key ,preshared_key ,endpoint_host ,endpoint_port ,allowed_ips ,keepalive_s ,priority ,id) - """ - sql = """ - SELECT public_key - , NULLIF(TRIM(preshared_key),'') as preshared_key - , endpoint_host - , endpoint_port - , allowed_ips - , keepalive_s - , priority - , id - FROM Server - WHERE iface_id = ? - ORDER BY priority ASC , id ASC; - """ - rows = conn.execute(sql ,(iface_id,)).fetchall() - out: List[Tuple[str ,Optional[str] ,str ,int ,str ,Optional[int] ,int ,int]] = [] - for pub ,psk ,host ,port ,alips ,ka ,prio ,sid in rows: - out.append((str(pub) ,(str(psk) if psk is not None else None) ,str(host) ,int(port) ,str(alips) ,(int(ka) if ka is not None else None) ,int(prio) ,int(sid))) - return out - - -# ---------- rendering ---------- - -def _render_conf( - iface_name: str - ,private_key: str - ,listen_port: Optional[int] - ,peers: Sequence[Tuple[str ,Optional[str] ,str ,int ,str ,Optional[int] ,int ,int]] -) -> str: - lines: List[str] = [] - lines += [ - "[Interface]" - ,f"PrivateKey = {private_key}" - ,"Table = off" - ] - if listen_port is not None: - lines.append(f"ListenPort = {listen_port}") - else: - lines.append("# ListenPort = 51820") - - lines.append("") # blank before peers - - if not peers: - # You may choose to raise instead; keeping an empty peer set is valid but rarely useful. - lines.append("# (no peers found for this interface)") - - for pub ,psk ,host ,port ,alips ,ka ,_prio ,_sid in peers: - lines += [ - "[Peer]" - ,f"PublicKey = {pub}" - ] - if psk is not None: - lines.append(f"PresharedKey = {psk}") - lines += [ - f"AllowedIPs = {alips}" - ,f"Endpoint = {host}:{port}" - ] - if ka is not None: - lines.append(f"PersistentKeepalive = {ka}") - lines.append("") # blank line between peers - - return "\n".join(lines).rstrip() + "\n" - - -# ---------- business ---------- - -def stage_wg_conf( - conn: sqlite3.Connection - ,iface_names: Sequence[str] - ,client_machine_name: str - ,stage_root: Optional[Path] = None - ,dry_run: bool = False -) -> Tuple[List[Path] ,List[str]]: - """ - Stage /etc/wireguard/.conf for selected ifaces under stage root. - """ - if not iface_names: - raise RuntimeError("no interfaces provided") - priv = _read_private_key(client_machine_name) - - meta = _fetch_iface_ids_and_ports(conn ,iface_names) - if not meta: - raise RuntimeError("none of the requested interfaces exist in DB") - - staged: List[Path] = [] - notes: List[str] = [] - sr = stage_root or STAGE_ROOT - outdir = sr / "etc" / "wireguard" - outdir.mkdir(parents=True ,exist_ok=True) - - for name in iface_names: - if name not in meta: - notes.append(f"skip: iface '{name}' missing from DB") - continue - - iface_id ,listen_port = meta[name] - peers = _fetch_peers_for_iface(conn ,iface_id) - - # basic validation of required peer fields - bad = [] - for pub ,_psk ,host ,port ,alips ,_ka ,_prio ,sid in peers: - if not pub or not host or not alips or not (1 <= int(port) <= 65535): - bad.append(sid) - if bad: - raise RuntimeError(f"iface '{name}': invalid peer rows id={bad}") - - conf_text = _render_conf(name ,priv ,listen_port ,peers) - - out = outdir / f"{name}.conf" - if dry_run: - notes.append(f"dry-run: would write {out}") - else: - out.write_text(conf_text) - out.chmod(0o600) - staged.append(out) - notes.append(f"staged: {out}") - - if not staged and not dry_run: - raise RuntimeError("nothing staged (all missing or skipped)") - - return (staged ,notes) - - -# ---------- CLI ---------- - -def main(argv=None) -> int: - ap = argparse.ArgumentParser(description="Stage minimal WireGuard configs with Table=off and no Address.") - ap.add_argument("client_machine_name" ,help="name used to read ./key/") - ap.add_argument("ifaces" ,nargs="+" ,help="interface names to stage") - ap.add_argument("--dry-run" ,action="store_true") - args = ap.parse_args(argv) - - with ic.open_db() as conn: - try: - paths ,notes = stage_wg_conf( - conn - ,args.ifaces - ,args.client_machine_name - ,dry_run=args.dry_run - ) - except Exception as e: - print(f"error: {e}" ,file=sys.stderr) - return 2 - - if notes: - print("\n".join(notes)) - return 0 - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/developer/source/tunnel-client/stage_wipe.py b/developer/source/tunnel-client/stage_wipe.py deleted file mode 100755 index 9270e13..0000000 --- a/developer/source/tunnel-client/stage_wipe.py +++ /dev/null @@ -1,71 +0,0 @@ -#!/usr/bin/env python3 -# stage_wipe.py — safely wipe ./stage (keeps hidden files unless --hard) - -from __future__ import annotations -import argparse, shutil, sys -from pathlib import Path - -ROOT = Path(__file__).resolve().parent -STAGE_ROOT = ROOT / "stage" - -def wipe_stage(*, yes: bool=False, dry_run: bool=False, hard: bool=False) -> int: - """Given flags, deletes staged output. Keeps dotfiles unless hard=True.""" - st = STAGE_ROOT - if not st.exists(): - print(f"Nothing to wipe: {st} does not exist.") - return 0 - - # safety: only operate on ./stage relative to this repo folder - if st.resolve() != (ROOT / "stage").resolve(): - print(f"Refusing: unsafe STAGE path: {st}", file=sys.stderr) - return 1 - - # quick stats - try: - count = sum(1 for _ in st.rglob("*")) - except Exception: - count = 0 - - if dry_run: - print(f"DRY RUN — would wipe: {st} (items: {count})") - for p in sorted(st.iterdir()): - print(f" {p.name}") - return 0 - - if not yes: - try: - ans = input(f"Permanently delete contents of {st}? [y/N] ").strip() - except EOFError: - ans = "" - if ans.lower() not in ("y","yes"): - print("Aborted.") - return 0 - - if hard: - shutil.rmtree(st, ignore_errors=True) - print(f"Removed stage dir: {st}") - else: - # remove non-hidden entries only; keep dotfiles (e.g. .gitignore) - for p in st.iterdir(): - if p.name.startswith("."): - continue # preserve hidden entries - try: - if p.is_dir(): - shutil.rmtree(p, ignore_errors=True) - else: - p.unlink(missing_ok=True) - except Exception: - pass - print(f"Cleared contents of: {st} (hidden files preserved)") - return 0 - -def main(argv): - ap = argparse.ArgumentParser() - ap.add_argument("--yes", action="store_true", help="do not prompt") - ap.add_argument("--dry-run", action="store_true", help="show what would be removed") - ap.add_argument("--hard", action="store_true", help="remove the stage dir itself") - args = ap.parse_args(argv) - return wipe_stage(yes=args.yes, dry_run=args.dry_run, hard=args.hard) - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/start_iface.py b/developer/source/tunnel-client/start_iface.py deleted file mode 100755 index 0590d38..0000000 --- a/developer/source/tunnel-client/start_iface.py +++ /dev/null @@ -1,230 +0,0 @@ -#!/usr/bin/env python3 -""" -start_iface.py - -Given: - - One or more WireGuard interface names (e.g., x6, US). - - Optional presence of systemd and wg-quick(8). - - Expected config at /etc/wireguard/.conf. - - Optional staged IP state script at /usr/local/bin/apply_ip_state.sh. - -Does: - - For each iface (best-effort, non-fatal steps): - 0) (optional) systemctl daemon-reload - 1) Start via systemd: systemctl start wg-quick@IFACE.service (unless --no-systemd) - else via wg-quick: wg-quick up IFACE (unless --no-wg-quick) - If the iface already exists and --force is given, it will attempt a - best-effort teardown then retry the start once. - 2) If started (or already present), optionally run IP state script: - /usr/local/bin/apply_ip_state.sh IFACE (unless --skip-ip-state) - - Logs each action taken or skipped. - -Returns: - - Exit 0 on success (even if some steps were no-ops); 2 on argument/privilege errors. - - Prints a concise, per-iface action log. - -Errors: - - If no ifaces are provided, or if not running as root (unless --force-nonroot). - -Notes: - - This does NOT edit config files or DB; it just brings the iface up cleanly. - - Safe to re-run: “already up/exist” conditions are handled. Use --force to - tear down and recreate if needed. -""" - -from __future__ import annotations -from pathlib import Path -from typing import Iterable, List, Sequence -import argparse -import os -import shutil -import subprocess -import sys - - -# ---------- helpers ---------- - -def _run(cmd: Sequence[str]) -> tuple[int, str, str]: - """Run a command, capture stdout/stderr, return (rc, out, err).""" - try: - cp = subprocess.run(cmd, check=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - return (cp.returncode, cp.stdout.strip(), cp.stderr.strip()) - except FileNotFoundError: - return (127, "", f"{cmd[0]}: not found") - -def _exists_iface(name: str) -> bool: - rc, _, _ = _run(["ip", "-o", "link", "show", "dev", name]) - return rc == 0 - -def _systemd_present() -> bool: - return shutil.which("systemctl") is not None - -def _wg_quick_present() -> bool: - return shutil.which("wg-quick") is not None - -def _conf_present(name: str) -> bool: - return Path(f"/etc/wireguard/{name}.conf").is_file() - -def _best_effort_teardown(name: str, logs: List[str]) -> None: - """Try to bring an iface down using systemd/wg-quick, then delete link; non-fatal.""" - unit = f"wg-quick@{name}.service" - if _systemd_present(): - rc, out, err = _run(["systemctl", "stop", unit]) - if rc == 0: - logs.append(f"systemctl: stopped {unit}") - else: - logs.append(f"systemctl: stop {unit} (ignored): {err or out or f'rc={rc}'}") - if _wg_quick_present(): - rc, out, err = _run(["wg-quick", "down", name]) - if rc == 0: - logs.append("wg-quick: down ok") - else: - logs.append(f"wg-quick: down (ignored): {err or out or f'rc={rc}'}") - if _exists_iface(name): - rc, out, err = _run(["ip", "link", "del", "dev", name]) - if rc == 0: - logs.append("ip link: deleted leftover device") - else: - logs.append(f"ip link: delete (ignored): {err or out or f'rc={rc}'}") - - -# ---------- business ---------- - -def start_ifaces( - ifaces: Sequence[str], - use_systemd: bool = True, - use_wg_quick: bool = True, - run_ip_state: bool = True, - ip_state_path: str = "/usr/local/bin/apply_ip_state.sh", - daemon_reload: bool = False, - force: bool = False, -) -> List[str]: - """ - Start the given WG ifaces and optionally apply IP state. - Returns a list of log lines. - """ - logs: List[str] = [] - - if not ifaces: - raise RuntimeError("no interfaces provided") - - have_systemd = _systemd_present() - have_wgquick = _wg_quick_present() - have_ipstate = Path(ip_state_path).is_file() - - if use_systemd and daemon_reload and have_systemd: - rc, _out, err = _run(["systemctl", "daemon-reload"]) - if rc == 0: - logs.append("systemctl: daemon-reload") - else: - logs.append(f"systemctl: daemon-reload (ignored): {err or f'rc={rc}'}") - - for name in ifaces: - logs.append(f"== {name} ==") - - # Ensure config exists - if not _conf_present(name): - logs.append(f"config missing: /etc/wireguard/{name}.conf (skip start)") - logs.append(f"status: absent") - logs.append("") - continue - - started = False - already_present = _exists_iface(name) - - # Optionally force recreate if device already around - if already_present and force: - logs.append("iface exists, --force given: tearing down before start") - _best_effort_teardown(name, logs) - already_present = _exists_iface(name) - - # Start via systemd or wg-quick - if use_systemd and have_systemd: - unit = f"wg-quick@{name}.service" - rc, out, err = _run(["systemctl", "start", unit]) - if rc == 0: - logs.append(f"systemctl: started {unit}") - started = True - else: - # If iface already exists, treat as running - if _exists_iface(name): - logs.append(f"systemctl: start {unit} reported error, but iface exists (continuing): {err or out or f'rc={rc}'}") - started = True - else: - logs.append(f"systemctl: start {unit} failed: {err or out or f'rc={rc}'}") - elif use_wg_quick and have_wgquick: - if already_present: - logs.append("wg-quick: iface already present") - started = True - else: - rc, out, err = _run(["wg-quick", "up", name]) - if rc == 0: - logs.append("wg-quick: up ok") - started = True - else: - # If iface popped up anyway, continue - if _exists_iface(name): - logs.append(f"wg-quick: up reported error, but iface exists (continuing): {err or out or f'rc={rc}'}") - started = True - else: - logs.append(f"wg-quick: up failed: {err or out or f'rc={rc}'}") - - else: - logs.append("no start method available (systemd/wg-quick disabled or not found)") - - # If requested, apply IP state post-start (useful when not using systemd drop-ins) - if run_ip_state and have_ipstate: - if _exists_iface(name): - rc, out, err = _run([ip_state_path, name]) - if rc == 0: - logs.append(f"ip-state: applied ({ip_state_path} {name})") - else: - logs.append(f"ip-state: apply failed: {err or out or f'rc={rc}'}") - else: - logs.append("ip-state: skipped (iface not present)") - - # Final status - logs.append(f"status: {'up' if _exists_iface(name) else 'down'}") - logs.append("") # spacer - - return logs - - -# ---------- CLI (wrapper only) ---------- - -def _require_root(allow_nonroot: bool) -> None: - if not allow_nonroot and os.geteuid() != 0: - raise RuntimeError("must run as root (use --force-nonroot to override)") - -def main(argv: Sequence[str] | None = None) -> int: - ap = argparse.ArgumentParser(description="Start one or more WireGuard interfaces safely.") - ap.add_argument("ifaces", nargs="+", help="interface names to start (e.g., x6 US)") - ap.add_argument("--no-systemd", action="store_true", help="do not call systemctl start wg-quick@IFACE") - ap.add_argument("--no-wg-quick", action="store_true", help="do not call wg-quick up IFACE") - ap.add_argument("--skip-ip-state", action="store_true", help="do not run apply_ip_state.sh after start") - ap.add_argument("--ip-state-path", default="/usr/local/bin/apply_ip_state.sh", help="path to the IP state script") - ap.add_argument("--daemon-reload", action="store_true", help="run systemctl daemon-reload before starts") - ap.add_argument("--force", action="store_true", help="if iface exists, tear down first and retry start") - ap.add_argument("--force-nonroot", action="store_true", help="allow running without root (best-effort)") - args = ap.parse_args(argv) - - try: - _require_root(allow_nonroot=args.force_nonroot) - logs = start_ifaces( - args.ifaces, - use_systemd=(not args.no_systemd), - use_wg_quick=(not args.no_wg_quick), - run_ip_state=(not args.skip_ip_state), - ip_state_path=args.ip_state_path, - daemon_reload=args.daemon_reload, - force=args.force, - ) - for line in logs: - print(line) - return 0 - except Exception as e: - print(f"error: {e}", file=sys.stderr) - return 2 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/stop_clean_iface.py b/developer/source/tunnel-client/stop_clean_iface.py deleted file mode 100755 index 7e6a53a..0000000 --- a/developer/source/tunnel-client/stop_clean_iface.py +++ /dev/null @@ -1,263 +0,0 @@ -#!/usr/bin/env python3 -""" -stop_clean_iface.py - -Stop one or more WireGuard interfaces and clean IP state (rules/routes/addresses). -""" - -from __future__ import annotations -from pathlib import Path -from typing import Iterable, List, Optional, Sequence, Tuple, Set -import argparse -import os -import re -import shutil -import subprocess -import sys - -__VERSION__ = "1.1-agg-errors" - -RT_TABLES_FILE = Path("/etc/iproute2/rt_tables") - -# ---------- helpers (shell) ---------- - -def _run(cmd: Sequence[str], dry: bool=False) -> tuple[int, str, str]: - if dry: - return (0, "", "") - try: - cp = subprocess.run(cmd, check=False, text=True, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) - return (cp.returncode, cp.stdout.strip(), cp.stderr.strip()) - except FileNotFoundError: - return (127, "", f"{cmd[0]}: not found") - -def _exists_iface(name: str) -> bool: - rc, _, _ = _run(["ip", "-o", "link", "show", "dev", name]) - return rc == 0 - -def _systemd_present() -> bool: - return shutil.which("systemctl") is not None - -def _wg_quick_present() -> bool: - return shutil.which("wg-quick") is not None - -# ---------- helpers (routing tables & rules) ---------- - -def _rt_table_num_for_name(name: str) -> Optional[int]: - if not RT_TABLES_FILE.exists(): - return None - try: - text = RT_TABLES_FILE.read_text() - except Exception: - return None - for line in text.splitlines(): - s = line.strip() - if not s or s.startswith("#"): - continue - parts = s.split() - if len(parts) >= 2 and parts[0].isdigit(): - num = int(parts[0]); nm = parts[1] - if nm == name: - return num - return None - -_RULE_RE = re.compile(r"""^\s*(\d+):\s*(.+?)\s*$""") - -def _current_rule_lines() -> List[Tuple[int,str]]: - rc, out, _ = _run(["ip", "-4", "rule", "show"]) - if rc != 0 or not out: - return [] - rows: List[Tuple[int,str]] = [] - for ln in out.splitlines(): - m = _RULE_RE.match(ln) - if not m: - continue - pref = int(m.group(1)) - rest = m.group(2) - rows.append((pref, rest)) - return rows - -def _prefs_matching_lookups(lookups: Sequence[str]) -> Set[int]: - toks = [t for t in lookups if t] - prefs: Set[int] = set() - if not toks: - return prefs - for pref, rest in _current_rule_lines(): - for t in toks: - if re.search(rf"\blookup\s+{re.escape(t)}\b", rest): - prefs.add(pref) - break - return prefs - -def _rule_del_by_pref(pref: int, logs: List[str], dry: bool) -> None: - rc, _out, err = _run(["ip", "-4", "rule", "del", "pref", str(pref)], dry=dry) - if rc == 0: - logs.append(f"ip rule: deleted pref {pref}") - else: - logs.append(f"ip rule: delete pref {pref} (ignored): {err or f'rc={rc}'}") - -def _flush_routes_for_table(table: str, logs: List[str], dry: bool) -> None: - rc, _out, err = _run(["ip", "-4", "route", "flush", "table", table], dry=dry) - if rc == 0: - logs.append(f"ip route: flushed table {table}") - else: - logs.append(f"ip route: flush table {table} (ignored): {err or f'rc={rc}'}") - -def _addr_del_all_v4_on_iface(iface: str, logs: List[str], dry: bool) -> None: - rc, out, err = _run(["ip", "-4", "-o", "addr", "show", "dev", iface], dry=dry) - if rc != 0: - logs.append(f"ip addr: list on {iface} (ignored): {err or f'rc={rc}'}") - return - cidrs: List[str] = [] - for ln in out.splitlines(): - parts = ln.split() - if len(parts) >= 4: - cidrs.append(parts[3]) - if not cidrs: - logs.append("ip addr: none to remove") - return - for cidr in cidrs: - rc2, _o2, e2 = _run(["ip", "-4", "addr", "del", cidr, "dev", iface], dry=dry) - if rc2 == 0: - logs.append(f"ip addr: deleted {cidr}") - else: - logs.append(f"ip addr: delete {cidr} (ignored): {e2 or f'rc={rc2}'}") - -# ---------- business ---------- - -def _clean_iface_ip_state(name: str, logs: List[str], *, dry: bool=False, aggressive: bool=False) -> None: - tokens: List[str] = [name] - num = _rt_table_num_for_name(name) - if num is not None: - tokens.append(str(num)) - - # Delete rules matching either numeric or named lookup tokens; loop to catch chains. - deleted_any = True - safety = 0 - while deleted_any and safety < 10: - safety += 1 - prefs = sorted(_prefs_matching_lookups(tokens)) - if not prefs: - deleted_any = False - break - for p in prefs: - _rule_del_by_pref(p, logs, dry) - if aggressive: - for p in range(17000, 17060): - _rule_del_by_pref(p, logs, dry) - - # Flush routes in the table by name and numeric (if known) - _flush_routes_for_table(name, logs, dry) - if num is not None: - _flush_routes_for_table(str(num), logs, dry) - - # Remove all IPv4 addresses on the iface - _addr_del_all_v4_on_iface(name, logs, dry) - -def stop_clean_ifaces( - ifaces: Sequence[str], - use_systemd: bool = True, - use_wg_quick: bool = True, - do_clean: bool = True, - aggressive: bool = False, - dry_run: bool = False, -) -> List[str]: - logs: List[str] = [] - if not ifaces: - raise RuntimeError("no interfaces provided") - - have_systemd = _systemd_present() - have_wgquick = _wg_quick_present() - - for name in ifaces: - logs.append(f"== {name} ==") - - if use_systemd and have_systemd: - unit = f"wg-quick@{name}.service" - rc, out, err = _run(["systemctl", "stop", unit], dry=dry_run) - if rc == 0: - logs.append(f"systemctl: stopped {unit}") - else: - msg = err or out or f"rc={rc}" - logs.append(f"systemctl: stop {unit} (ignored): {msg}") - elif use_systemd and not have_systemd: - logs.append("systemctl: not found; skipped") - - if use_wg_quick and have_wgquick: - rc, out, err = _run(["wg-quick", "down", name], dry=dry_run) - if rc == 0: - logs.append("wg-quick: down ok") - else: - msg = err or out or f"rc={rc}" - logs.append(f"wg-quick: down (ignored): {msg}") - elif use_wg_quick and not have_wgquick: - logs.append("wg-quick: not found; skipped") - - if do_clean: - _clean_iface_ip_state(name, logs, dry=dry_run, aggressive=aggressive) - else: - logs.append("clean: skipped (--no-clean)") - - if _exists_iface(name): - rc, out, err = _run(["ip", "link", "del", "dev", name], dry=dry_run) - if rc == 0: - logs.append("ip link: deleted device") - else: - msg = err or out or f"rc={rc}" - logs.append(f"ip link: delete (ignored): {msg}") - else: - logs.append("ip link: device not present; nothing to delete") - - final_present = _exists_iface(name) - logs.append(f"status: {'gone' if not final_present else 'still present'}") - logs.append("") - - return logs - -# ---------- CLI (wrapper with aggregated errors) ---------- - -def main(argv: Sequence[str] | None = None) -> int: - ap = argparse.ArgumentParser( - description="Stop one or more WireGuard interfaces and clean IP state.", - add_help=True) - ap.add_argument("ifaces", nargs="*", help="interface names to stop (e.g., x6 US)") - ap.add_argument("--no-systemd", action="store_true", help="do not call systemctl stop wg-quick@IFACE") - ap.add_argument("--no-wg-quick", action="store_true", help="do not call wg-quick down IFACE") - ap.add_argument("--no-clean", action="store_true", help="skip IP cleanup (rules/routes/addresses)") - ap.add_argument("--aggressive", action="store_true", help="also purge common rule pref window (17000-17059)") - ap.add_argument("--dry-run", action="store_true", help="print what would be done without changing state") - ap.add_argument("--force-nonroot", action="store_true", help="allow running without root (best-effort)") - - args = ap.parse_args(argv) - - # Aggregate invocation errors - errors: List[str] = [] - if os.geteuid() != 0 and not args.force_nonroot: - errors.append("must run as root (use --force-nonroot to override)") - if not args.ifaces: - errors.append("no interfaces provided") - - if errors: - sys.stderr.write(ap.format_usage()) - prog = Path(sys.argv[0]).name or "stop_clean_iface.py" - sys.stderr.write(f"{prog}: error: " + "; ".join(errors) + "\n") - return 2 - - try: - logs = stop_clean_ifaces( - args.ifaces, - use_systemd=(not args.no_systemd), - use_wg_quick=(not args.no_wg_quick), - do_clean=(not args.no_clean), - aggressive=args.aggressive, - dry_run=args.dry_run, - ) - for line in logs: - print(line) - return 0 - except Exception as e: - print(f"error: {e}", file=sys.stderr) - return 2 - -if __name__ == "__main__": - sys.exit(main(sys.argv[1:])) diff --git a/developer/source/tunnel-client/todo.org b/developer/source/tunnel-client/todo.org deleted file mode 100644 index 46a1a41..0000000 --- a/developer/source/tunnel-client/todo.org +++ /dev/null @@ -1,73 +0,0 @@ -n#+TITLE: subu / WireGuard — TODO -#+AUTHOR: Thomas & Nerith (session) -#+LANGUAGE: en -#+OPTIONS: toc:2 num:t -#+TODO: TODO(t) NEXT(n) WAITING(w) BLOCKED(b) | DONE(d) CANCELED(c) - -- Your current DB schema (the one you pasted earlier) does not include a listen-port field on Iface. So if you want ListenPort = … to be driven from the DB, add a column like Iface.listen_port INTEGER CHECK(listen_port BETWEEN 1 AND 65535). - -- have the stage commands echo relative pathnames instead of absolute as they do now. - -- the one private key pair per client (instead of per interface), turns out to be a bad idea, as we can't manage tunnels individually, say, by revoking keys. We need to move to a key pair per interface instead. - -- db_wipe needs to delete the key directory contents also - -------------------------------- - -- Known gaps / open decisions - - Systemd drop-in to call staged scripts on ~wg-quick@IFACE~ up (IPv4 addrs + policy rules). - - Staged policy-rules script (source-based + uidrange rules) to replace the old global ~IP_rule_add.sh~ usage. - - Installer flow & atomic writes (copy staged files, set owner/perms; safe update of ~/etc/iproute2/rt_tables~). - - Pool size policy: default /16 with /32 hosts is implemented; decision pending on /8 vs /16. - - Style guardrails (RT commas / two-space indent) are manual; optional linter TBD. - -* NEXT wiring (high-level order) -1) Stage: /etc/iproute2/rt_tables (merge) for selected ifaces. -2) Stage: /usr/local/bin/set_iface_ipv4_addrs.sh for same ifaces. -3) Stage: /usr/local/bin/set_policy_rules_for_ifaces.sh (new; replaces old global add tool). -4) Stage: systemd drop-ins for ~wg-quick@IFACE.service.d/10-postup.conf~ to call (2) then (3). -5) Install: copy staged files → system, set perms/owner; ~systemctl daemon-reload~. -6) Bring-up: ~wg-quick up IFACE~; verify routes/rules; smoke tests. - -* TODO Add “missing-iface” guard to staged IPv4 script -- When iface doesn’t exist yet, log and continue (no non-zero exit). - -* TODO Stage policy rules script (idempotent) -- For each iface: - - Source-based rule: =from lookup =. - - UID rules: =uidrange U-U lookup = for each bound UID. -- Only for ifaces passed on the CLI; DB-driven; no kernel writes here. -- Emit with checks (skip if grep finds the exact rule). - -* TODO Systemd drop-in generator -- Emit to: ~stage/etc/systemd/wg-quick@IFACE.service.d/10-postup.conf~. -- Include: - - =ExecStartPre=-/usr/sbin/ip link delete IFACE= (clean stale link). - - =ExecStartPost=+/usr/local/bin/set_iface_ipv4_addrs.sh=. - - =ExecStartPost=+/usr/local/bin/set_policy_rules_for_ifaces.sh=. - - =ExecStartPost=+/usr/bin/logger 'wg-quick@IFACE up: addrs+rules applied'=. - -* TODO Installer flow -- Copy staged files with perms (0500 for scripts; 0644 for rt_tables; 0755 for dirs). -- Atomic update for ~/etc/iproute2/rt_tables~ (write temp + move); keep timestamped backup. -- ~systemctl daemon-reload~ after installing drop-ins. - -* WAITING Decide “no-op staging” policy for rt_tables -- Option A: Always stage a copy (deterministic deployment). -- Option B: Stage only when there are new entries (quieter diffs). - -* TODO Tests -- Unit-ish: parse/plan functions for both staging scripts (dry-run cases, collisions, skip-missing cases). -- Integration: - - Create temp WG iface: ~ip link add dev t0 type wireguard~ (and delete after). - - Run staged scripts; verify ~ip -4 addr show dev t0~, ~ip rule show~, ~ip route show table ~. - - Bring up real ~wg-quick up x6~; repeat verifications. - -* TODO Docs -- Append “operational runbook” to the org manual (bring-up, verify, recover, teardown). - -* DONE What’s already proven by commands (from log) -- all db_init is running, orchestrated by db_init_StanleyPark -- =stage_rt_tables_merge.py --from-db x6 US= created staged rt_tables with merges. -- =stage_iface_ipv4_script.py x6 US= staged ~set_iface_ipv4_addrs.sh~. - diff --git a/developer/source/tunnel-client/wg_keys_incommon.py b/developer/source/tunnel-client/wg_keys_incommon.py deleted file mode 100644 index 1578899..0000000 --- a/developer/source/tunnel-client/wg_keys_incommon.py +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env python3 -# wg_keys_incommon.py — predicates + actuators for WG keypairs - -from __future__ import annotations -import shutil, subprocess, sqlite3 - -def wellformed_client_keypair(conn: sqlite3.Connection, iface: str) -> bool: - """Predicate: True iff client IFACE has a syntactically valid WG keypair.""" - row = conn.execute( - "SELECT private_key, public_key FROM Iface WHERE iface=? LIMIT 1;", (iface,) - ).fetchone() - if not row: return False - priv, pub = (row[0] or ""), (row[1] or "") - return (43 <= len(priv.strip()) <= 45) and (43 <= len(pub.strip()) <= 45) - -def generate_client_keypair_if_missing(conn: sqlite3.Connection, iface: str) -> bool: - """ - Actuator: if IFACE lacks a well-formed keypair, generate one with `wg`, - store it in the DB, and return True. Return False if nothing changed. - """ - if wellformed_client_keypair(conn, iface): - return False - if not shutil.which("wg"): - raise RuntimeError("wg not found; cannot generate keys") - gen = subprocess.run(["wg","genkey"], capture_output=True, text=True, check=True) - priv = gen.stdout.strip() - pubp = subprocess.run(["wg","pubkey"], input=priv.encode(), capture_output=True, check=True) - pub = pubp.stdout.decode().strip() - conn.execute( - "UPDATE Iface SET private_key=?, public_key=?, " - "updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') WHERE iface=?", - (priv, pub, iface), - ) - return True diff --git a/developer/source/tunnel-server/set_client_key.sh b/developer/source/tunnel-server/set_client_key.sh deleted file mode 100755 index 9e28f6b..0000000 --- a/developer/source/tunnel-server/set_client_key.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env bash -# set_client_key.sh — replace/set a client's public key on the server -# Usage: set_client_key.sh [allowed-ips=10.8.0.2/32] [iface=wg0] -# Example: set_client_key.sh 88gTdpESSwAc0iip6tVotc8/taZErY18n3lzrgAd+XY= 10.8.0.2/32 wg0 - -set -euo pipefail - -PUB="${1:-}" -ALLOWED="${2:-10.8.0.2/32}" -IFACE="${3:-wg0}" -CFG="/etc/wireguard/${IFACE}.conf" - -[[ $EUID -eq 0 ]] || { echo "❌ Must be run as root."; exit 1; } -command -v wg >/dev/null || { echo "❌ wg not found."; exit 1; } -command -v wg-quick >/dev/null || { echo "❌ wg-quick not found."; exit 1; } - -[[ -n "$PUB" ]] || { echo "Usage: $0 [allowed-ips] [iface]"; exit 2; } -# quick sanity on key length -kl=${#PUB}; [[ $kl -ge 43 && $kl -le 45 ]] || { echo "❌ Public key length looks wrong."; exit 2; } -[[ -f "$CFG" ]] || { echo "❌ Config not found: $CFG"; exit 1; } - -# Require the interface to be up (simplest, reliable path) -if ! wg show "$IFACE" >/dev/null 2>&1; then - echo "❌ Interface $IFACE is not up. Start it first: wg-quick up $IFACE" - echo " Or stop it and edit $CFG manually (replace the peer that has AllowedIPs = $ALLOWED)." - exit 1 -fi - -# Remove any existing peer that currently owns the same AllowedIPs (typical /32 per client) -while read -r oldkey oldips; do - if [[ "$oldips" == "$ALLOWED" ]]; then - echo "→ Removing existing peer $oldkey with AllowedIPs $ALLOWED" - wg set "$IFACE" peer "$oldkey" remove || true - fi -done < <(wg show "$IFACE" allowed-ips | awk '{print $1, $2}') - -# Add the new peer -wg set "$IFACE" peer "$PUB" allowed-ips "$ALLOWED" - -# Persist runtime state back to the config (works great even if SaveConfig=true) -wg-quick save "$IFACE" - -echo "✔ Updated $IFACE: set peer $PUB with AllowedIPs $ALLOWED and saved to $CFG" -wg show "$IFACE" diff --git a/developer/source/tunnel-server/setup.sh b/developer/source/tunnel-server/setup.sh deleted file mode 100755 index eee81ce..0000000 --- a/developer/source/tunnel-server/setup.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/usr/bin/env bash -# 2025-09-05 -# Debian 12 Setup: WireGuard egress server + one client (safe/idempotent) -set -euo pipefail -umask 0077 -[[ $EUID -eq 0 ]] || { echo "❌ run as root"; exit 1; } -run(){ echo "+ $*"; eval "$@"; } - -WG_IF="wg0" -WG_PORT="${WG_PORT:-51820}" -WG_DIR="/etc/wireguard" -CLIENT_DIR="/root/wireguard" -CLIENT_NAME="${CLIENT_NAME:-client1}" - -SERVER_NET_V4="${SERVER_NET_V4:-10.8.0.0/24}" -SERVER_ADDR_V4="${SERVER_ADDR_V4:-10.8.0.1/24}" -CLIENT_ADDR_V4="${CLIENT_ADDR_V4:-10.8.0.2/32}" - -# --- Packages --- -need_pkgs=() -for p in wireguard qrencode iproute2; do command -v ${p%% *} >/dev/null 2>&1 || need_pkgs+=("$p"); done -if ((${#need_pkgs[@]})); then - DEBIAN_FRONTEND=noninteractive run apt-get update - run apt-get install -y "${need_pkgs[@]}" -fi - -install -d -m 0700 "$WG_DIR" "$CLIENT_DIR" - -# --- Detect WAN IF + public IPv4 --- -WAN_IF=$(ip -o -4 route show to default | awk '{print $5; exit}') -[[ -n "${WAN_IF:-}" ]] || { echo "❌ Could not detect WAN interface"; exit 1; } -SERVER_IPv4=$(ip -o -4 addr show dev "$WAN_IF" | awk '{print $4}' | cut -d/ -f1 | head -n1) -[[ -n "${SERVER_IPv4:-}" ]] || SERVER_IPv4="" - -# --- Keys (server) --- -if [[ ! -f "$WG_DIR/server.key" ]]; then - (umask 077; wg genkey | tee "$WG_DIR/server.key" | wg pubkey > "$WG_DIR/server.pub") - chmod 600 "$WG_DIR/server.key" -fi -SERVER_PRIV=$(cat "$WG_DIR/server.key") -SERVER_PUB=$(cat "$WG_DIR/server.pub") - -# --- Keys (client) --- -if [[ ! -f "$CLIENT_DIR/${CLIENT_NAME}.key" ]]; then - (umask 077; wg genkey | tee "$CLIENT_DIR/${CLIENT_NAME}.key" | wg pubkey > "$CLIENT_DIR/${CLIENT_NAME}.pub") - chmod 600 "$CLIENT_DIR/${CLIENT_NAME}.key" -fi -CLIENT_PRIV=$(cat "$CLIENT_DIR/${CLIENT_NAME}.key") -CLIENT_PUB=$(cat "$CLIENT_DIR/${CLIENT_NAME}.pub") - -# --- IPv4 forwarding --- -install -d -m 0755 /etc/sysctl.d -cat > /etc/sysctl.d/99-wireguard-forwarding.conf <<'EOF' -net.ipv4.ip_forward=1 -# net.ipv6.conf.all.forwarding=1 -EOF -sysctl --system >/dev/null - -# --- Write server config (backup if existing) --- -CFG="$WG_DIR/${WG_IF}.conf" -if [[ -f "$CFG" ]]; then - cp -a "$CFG" "$CFG.bak.$(date -u +%Y%m%dT%H%M%SZ)" -fi -cat > "$CFG" < "$CLIENT_CFG" </dev/null 2>&1 && ufw status | grep -q "Status: active"; then - ufw status | grep -q "^${WG_PORT}/udp" || ufw allow "${WG_PORT}/udp" || true -fi - -# --- Enable interface --- -run systemctl enable --now wg-quick@"$WG_IF" - -# --- Status + QR --- -echo -wg show "$WG_IF" || true -echo -echo "Client file: $CLIENT_CFG" -command -v qrencode >/dev/null 2>&1 && { echo "QR (WireGuard mobile import):"; qrencode -t ansiutf8 < "$CLIENT_CFG"; } -echo -echo "If Endpoint autodetection is wrong, edit it to your public IP or DNS." diff --git a/developer/tunnel-client/.gitignore b/developer/tunnel-client/.gitignore new file mode 100644 index 0000000..5c016c6 --- /dev/null +++ b/developer/tunnel-client/.gitignore @@ -0,0 +1,3 @@ + +__pycache__ + diff --git a/developer/tunnel-client/db/.gitignore b/developer/tunnel-client/db/.gitignore new file mode 100644 index 0000000..53642ce --- /dev/null +++ b/developer/tunnel-client/db/.gitignore @@ -0,0 +1,4 @@ + +* +!.gitignore + diff --git a/developer/tunnel-client/db_bind_user_to_iface.py b/developer/tunnel-client/db_bind_user_to_iface.py new file mode 100755 index 0000000..1ec4700 --- /dev/null +++ b/developer/tunnel-client/db_bind_user_to_iface.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python3 +# db_bind_user_to_iface.py — bind ONE linux user to ONE interface in the DB (no schema writes) +# Usage: ./db_bind_user_to_iface.py # e.g. ./db_bind_user_to_iface.py Thomas-x6 x6 + +from __future__ import annotations +import sys, sqlite3, pwd +from pathlib import Path +from typing import Optional +import incommon as ic # ROOT_DIR/DB_PATH, open_db() + +def system_uid_or_none(username: str) -> Optional[int]: + """Return the system UID for username, or None if the user doesn't exist locally.""" + try: + return pwd.getpwnam(username).pw_uid + except KeyError: + return None + +def bind_user_to_iface(conn: sqlite3.Connection, iface: str, username: str) -> str: + """ + Given (iface, username): + - Look up client.id by iface (table: client) + - Upsert into User(iface_id, username, uid) + - Update uid based on local /etc/passwd (None if user not found) + Returns a concise status string. + """ + row = conn.execute("SELECT id FROM Iface WHERE iface=? LIMIT 1;", (iface,)).fetchone() + if not row: + raise RuntimeError(f"Interface '{iface}' not found in client") + + iface_id = int(row[0]) + uid_val = system_uid_or_none(username) + + # Upsert binding + conn.execute(""" + INSERT INTO User (iface_id, username, uid, created_at, updated_at) + VALUES (?, ?, ?, strftime('%Y-%m-%dT%H:%M:%SZ','now'), strftime('%Y-%m-%dT%H:%M:%SZ','now')) + ON CONFLICT(iface_id, username) DO UPDATE SET + uid = excluded.uid, + updated_at = strftime('%Y-%m-%dT%H:%M:%SZ','now'); + """, (iface_id, username, uid_val)) + + if uid_val is None: + return f"bound {username} → {iface} (uid=NULL; user not present on this system)" + return f"bound {username} → {iface} (uid={uid_val})" + +def main(argv: list[str]) -> int: + if len(argv) != 2: + prog = Path(sys.argv[0]).name + print(f"Usage: {prog} ", file=sys.stderr) + return 2 + + username, iface = argv + try: + with ic.open_db() as conn: + msg = bind_user_to_iface(conn, iface, username) + conn.commit() + except FileNotFoundError as e: + print(f"❌ {e}", file=sys.stderr); return 1 + except sqlite3.Error as e: + print(f"❌ sqlite error: {e}", file=sys.stderr); return 1 + except RuntimeError as e: + print(f"❌ {e}", file=sys.stderr); return 1 + + print(f"✔ {msg}") + return 0 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/db_checks.py b/developer/tunnel-client/db_checks.py new file mode 100755 index 0000000..ef172de --- /dev/null +++ b/developer/tunnel-client/db_checks.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# db_checks.py — quick audit for common misconfigurations + +from __future__ import annotations +import sys, sqlite3, ipaddress +import incommon as ic + +def audit(conn: sqlite3.Connection) -> int: + errs = 0 + + # 1) client present? + C = ic.rows(conn, """ + SELECT id, iface, local_address_cidr, rt_table_name_eff + FROM v_client_effective + ORDER BY iface; + """) + if not C: + print("WARN: no client present"); return 1 + + # 2) CIDR sanity + for cid, iface, cidr, rtname in C: + try: + ipaddress.IPv4Interface(cidr) + except Exception as e: + print(f"ERR: client {iface} has invalid CIDR {cidr}: {e}") + errs += 1 + + # 3) server exist and map to client + S = ic.rows(conn, """ + SELECT s.id, c.iface, s.name, s.public_key, s.endpoint_host, s.endpoint_port, s.allowed_ips + FROM server s + JOIN Iface c ON c.id = s.iface_id + ORDER BY c.iface, s.name; + """) + if not S: + print("WARN: no server present for any client") + + # 4) user bindings exist? (not required, but useful) + UB = ic.rows(conn, """ + SELECT c.iface, ub.username, ub.uid + FROM User ub + JOIN Iface c ON c.id = ub.iface_id + ORDER BY c.iface, ub.username; + """) + if not UB: + print("WARN: no User present") + + # 5) duplicate tunnel IPs across client (/32 equality) + tunnel_hosts = {} + for _, iface, cidr, _ in C: + try: + host = str(ipaddress.IPv4Interface(cidr).ip) + if host in tunnel_hosts and tunnel_hosts[host] != iface: + print(f"ERR: duplicate tunnel host {host} on {tunnel_hosts[host]} and {iface}") + errs += 1 + else: + tunnel_hosts[host] = iface + except Exception: + pass + + # 6) Server AllowedIPs hygiene: warn when 0.0.0.0/0 appears in server table + for sid, iface, sname, pub, host, port, allow in S: + if allow.strip() == "0.0.0.0/0": + # client-side full-tunnel is fine; server-side peer should use /32 entries + print(f"NOTE: server(name={sname}, client={iface}) has AllowedIPs=0.0.0.0/0 (client-side full-tunnel). Ensure server peer uses /32(s).") + + # 7) meta.subu_cidr present? + M = dict(ic.rows(conn, "SELECT key, value FROM meta;")) + if "subu_cidr" not in M: + print("WARN: meta.subu_cidr missing; default tooling may assume 10.0.0.0/24") + + print("OK: audit complete" if errs == 0 else f"FAIL: {errs} error(s)") + return 1 if errs else 0 + +def main(argv: list[str]) -> int: + try: + with ic.open_db() as conn: + return audit(conn) + except (sqlite3.Error, FileNotFoundError) as e: + print(f"❌ {e}", file=sys.stderr) + return 2 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/db_init_StanleyPark.py b/developer/tunnel-client/db_init_StanleyPark.py new file mode 100755 index 0000000..a031a45 --- /dev/null +++ b/developer/tunnel-client/db_init_StanleyPark.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# db_init_StanleyPark.py — initialize the DB for the StanleyPark client + +from __future__ import annotations +import sys, subprocess, sqlite3 +from pathlib import Path +import incommon as ic + +# Use existing business functions (no duplication) +from db_init_iface_x6 import init_iface_x6 +from db_init_iface_US import init_iface_US +from db_init_server_x6 import init_server_x6 +from db_init_server_US import init_server_US +from db_bind_user_to_iface import bind_user_to_iface +from db_init_ip_table_registration import assign_missing_rt_table_ids +from db_init_ip_iface_addr_assign import reconcile_kernel_and_db_ipv4_addresses +from db_init_route_defaults import seed_default_routes + +ROOT = Path(__file__).resolve().parent +DB = ic.DB_PATH + +def msg_wrapped_call(title: str, fn=None, *args, **kwargs): + """Print a before/after status line around calling `fn(*args, **kwargs)`. + Returns the function’s return value.""" + print(f"→ {title}", flush=True) + res = fn(*args, **kwargs) if fn else None + print(f"✔ {title}" + (f": {res}" if res not in (None, "") else ""), flush=True) + return res + +def _run_local(script: str, *argv: str): + subprocess.run([str(ROOT / script), *argv], check=True) + +def db_init_StanleyPark() -> int: + """ + Given the local SQLite DB at ic.DB_PATH, this: + 1) loads schema + 2) upserts ifaces (x6, US) + 3) upserts servers (x6, US) + 4) binds users (Thomas-x6→x6, Thomas-US→US) + 5) seeds per-iface default routes into Route + 6) assigns missing rt_table_id values from /etc/iproute2/rt_tables + 7) reconciles/assigns interface IPv4 addresses (kernel→DB, then pool) + 8) commits and prints status + Returns 0 on success (raises on failure). + """ + # 1) Schema + msg_wrapped_call("db_schema_load.sh", _run_local, "db_schema_load.sh") + + # 2) DB work in one connection/commit + with ic.open_db(DB) as conn: + # ifaces + servers + user bindings + msg_wrapped_call("db_init_iface_x6.py (init_iface_x6)", init_iface_x6, conn) + msg_wrapped_call("db_init_server_x6.py (init_server_x6)", init_server_x6, conn) + msg_wrapped_call("bind_user_to_iface: Thomas-x6 → x6", bind_user_to_iface, conn, "x6", "Thomas-x6") + + msg_wrapped_call("db_init_iface_US.py (init_iface_US)", init_iface_US, conn) + msg_wrapped_call("db_init_server_US.py (init_server_US)", init_server_US, conn) + msg_wrapped_call("bind_user_to_iface: Thomas-US → US", bind_user_to_iface, conn, "US", "Thomas-US") + + # 5) seed default routes for the selected ifaces (no duplicates; idempotent) + msg_wrapped_call( + "db_init_route_defaults (x6,US)", + lambda: seed_default_routes(conn, iface_names=["x6","US"], overwrite=False) + ) + + # 6) assign rt_table_id from system tables (DB-only; no file writes) + msg_wrapped_call( + "db_init_ip_table_registration", + lambda: assign_missing_rt_table_ids(conn, low=20000, high=29999, dry_run=False) + ) + + # 7) reconcile/assign interface IPv4 addresses (kernel → DB; pool for missing) + msg_wrapped_call( + "db_init_ip_iface_addr_assign", + lambda: reconcile_kernel_and_db_ipv4_addresses( + conn, + pool_cidr="10.0.0.0/16", + assign_prefix=32, + reserve_first=0, + dry_run=False + ) + ) + + # 8) commit + conn.commit() + print("✔ commit: database updated") + + return 0 + +def main(argv): + if argv: + print(f"Usage: {Path(sys.argv[0]).name}", file=sys.stderr) + return 2 + try: + return db_init_StanleyPark() + except (subprocess.CalledProcessError, sqlite3.Error, FileNotFoundError) as e: + print(f"❌ {e}", file=sys.stderr) + return 1 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/db_init_iface.py b/developer/tunnel-client/db_init_iface.py new file mode 100644 index 0000000..1f9443e --- /dev/null +++ b/developer/tunnel-client/db_init_iface.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 +# Helpers to seed/update a row in client. + +from __future__ import annotations +import sqlite3 +from typing import Any, Optional, Dict +import incommon as ic # provides DB_PATH, open_db + +# Normally don't set the addr_cidr, the system will automically +# assign a free address, or reuse one that is already set. + +def upsert_client(conn: sqlite3.Connection, + *, + iface: str, + addr_cidr: Optional[str] = None, + rt_table_name: Optional[str] = None, + rt_table_id: Optional[int] = None, + mtu: Optional[int] = None, + fwmark: Optional[int] = None, + dns_mode: Optional[str] = None, # 'none' or 'static' + dns_servers: Optional[str] = None, + autostart: Optional[int] = None, # 0 or 1 + bound_user: Optional[str] = None, + bound_uid: Optional[int] = None + ) -> str: + row = conn.execute( + """SELECT id, iface, rt_table_id, rt_table_name, local_address_cidr, + mtu, fwmark, dns_mode, dns_servers, autostart, + bound_user, bound_uid + FROM Iface WHERE iface=? LIMIT 1;""", + (iface,) + ).fetchone() + + defname = rt_table_name if rt_table_name is not None else iface + desired: Dict[str, Any] = {"iface": iface, "local_address_cidr": addr_cidr} + if rt_table_id is not None: desired["rt_table_id"] = rt_table_id + if rt_table_name is not None: desired["rt_table_name"] = rt_table_name + if mtu is not None: desired["mtu"] = mtu + if fwmark is not None: desired["fwmark"] = fwmark + if dns_mode is not None: desired["dns_mode"] = dns_mode + if dns_servers is not None: desired["dns_servers"] = dns_servers + if autostart is not None: desired["autostart"] = autostart + if bound_user is not None: desired["bound_user"] = bound_user + if bound_uid is not None: desired["bound_uid"] = bound_uid + + if row is None: + fields = ["iface","local_address_cidr","rt_table_name"] + vals = [iface, addr_cidr, defname] + for k in ("rt_table_id","mtu","fwmark","dns_mode","dns_servers","autostart","bound_user","bound_uid"): + if k in desired: fields.append(k); vals.append(desired[k]) + q = f"INSERT INTO Iface ({','.join(fields)}) VALUES ({','.join('?' for _ in vals)});" + cur = conn.execute(q, vals); conn.commit() + return f"seeded: client(iface={iface}) id={cur.lastrowid} addr={addr_cidr} rt={defname}" + else: + cid, _, rt_id, rt_name, cur_addr, cur_mtu, cur_fwm, cur_dns_mode, cur_dns_srv, cur_auto, cur_buser, cur_buid = row + current = { + "local_address_cidr": cur_addr, "rt_table_id": rt_id, "rt_table_name": rt_name, + "mtu": cur_mtu, "fwmark": cur_fwm, "dns_mode": cur_dns_mode, "dns_servers": cur_dns_srv, + "autostart": cur_auto, "bound_user": cur_buser, "bound_uid": cur_buid + } + changes: Dict[str, Any] = {} + for k, v in desired.items(): + if k == "iface": continue + if current.get(k) != v: changes[k] = v + if rt_name is None and "rt_table_name" not in changes: + changes["rt_table_name"] = defname + if not changes: + return f"ok: client(iface={iface}) unchanged id={cid} addr={cur_addr} rt={rt_name or defname}" + sets = ", ".join(f"{k}=?" for k in changes) + vals = list(changes.values()) + [iface] + conn.execute(f"UPDATE Iface SET {sets} WHERE iface=?;", vals); conn.commit() + return f"updated: client(iface={iface}) id={cid} " + " ".join(f"{k}={changes[k]}" for k in changes) diff --git a/developer/tunnel-client/db_init_iface_US.py b/developer/tunnel-client/db_init_iface_US.py new file mode 100755 index 0000000..bf03c95 --- /dev/null +++ b/developer/tunnel-client/db_init_iface_US.py @@ -0,0 +1,6 @@ +# db_init_iface_US.py +from db_init_iface import upsert_client + +def init_iface_US(conn): + # iface US with dedicated table 'US' and a distinct host /32 + return upsert_client(conn, iface="US", rt_table_name="US") diff --git a/developer/tunnel-client/db_init_iface_x6.py b/developer/tunnel-client/db_init_iface_x6.py new file mode 100755 index 0000000..82eb5fe --- /dev/null +++ b/developer/tunnel-client/db_init_iface_x6.py @@ -0,0 +1,6 @@ +# db_init_iface_x6.py +from db_init_iface import upsert_client + +def init_iface_x6(conn): + # iface x6 with dedicated table 'x6' and host /32 + return upsert_client(conn, iface="x6", rt_table_name="x6") diff --git a/developer/tunnel-client/db_init_ip_iface_addr_assign.py b/developer/tunnel-client/db_init_ip_iface_addr_assign.py new file mode 100755 index 0000000..561635e --- /dev/null +++ b/developer/tunnel-client/db_init_ip_iface_addr_assign.py @@ -0,0 +1,224 @@ +#!/usr/bin/env python3 +""" +db_init_ip_iface_addr_assign.py + +Business API: + reconcile_kernel_and_db_ipv4_addresses(conn ,pool_cidr="10.0.0.0/16" ,assign_prefix=32 ,reserve_first=0 ,dry_run=False) + -> (updated_count ,notes) +""" + +from __future__ import annotations +import argparse +import ipaddress +import json +import sqlite3 +import subprocess +from typing import Dict ,Iterable ,List ,Optional ,Sequence ,Tuple + +import incommon as ic + + +def fetch_ifaces(conn: sqlite3.Connection) -> List[Tuple[int ,str ,Optional[str]]]: + sql = """ + SELECT id, + iface, + NULLIF(TRIM(local_address_cidr),'') AS local_address_cidr + FROM Iface + ORDER BY id; + """ + cur = conn.execute(sql) + rows = cur.fetchall() + return [ + (int(r[0]) ,str(r[1]) ,(str(r[2]) if r[2] is not None else None)) + for r in rows + ] + + +def update_iface_addresses(conn: sqlite3.Connection ,updates: Dict[int ,str]) -> int: + if not updates: + return 0 + with conn: + for iface_id ,cidr in updates.items(): + conn.execute("UPDATE Iface SET local_address_cidr=? WHERE id=?" ,(cidr ,iface_id)) + return len(updates) + + +def kernel_ipv4_cidr_for(iface: str) -> Optional[str]: + try: + cp = subprocess.run( + ["ip","-j","addr","show","dev",iface] + ,check=False + ,capture_output=True + ,text=True + ) + except Exception: + return None + if cp.returncode != 0 or not cp.stdout.strip(): + return None + try: + data = json.loads(cp.stdout) + except json.JSONDecodeError: + return None + if not isinstance(data ,list) or not data: + return None + addr_info = data[0].get("addr_info") or [] + for a in addr_info: + if a.get("family") == "inet" and a.get("scope") == "global": + local = a.get("local"); plen = a.get("prefixlen") + if local and isinstance(plen ,int): + return f"{local}/{plen}" + for a in addr_info: + if a.get("family") == "inet": + local = a.get("local"); plen = a.get("prefixlen") + if local and isinstance(plen ,int): + return f"{local}/{plen}" + return None + + +def kernel_ipv4_map(ifaces: Sequence[str]) -> Dict[str ,Optional[str]]: + return {name: kernel_ipv4_cidr_for(name) for name in ifaces} + + +def _host_ip_from_cidr(cidr: str): + try: + ipi = ipaddress.ip_interface(cidr) + except ValueError: + return None + if isinstance(ipi.ip ,ipaddress.IPv4Address): + return ipaddress.IPv4Address(int(ipi.ip)) + return None + + +def _collect_used_hosts_from(cidrs: Iterable[str] ,pool: ipaddress.IPv4Network) -> List[ipaddress.IPv4Address]: + used: List[ipaddress.IPv4Address] = [] + for c in cidrs: + hip = _host_ip_from_cidr(c) + if hip is not None and hip in pool: + used.append(hip) + return used + + +def _first_free_hosts( + count: int + ,used_hosts: Iterable[ipaddress.IPv4Address] + ,pool: ipaddress.IPv4Network + ,reserve_first: int = 0 +) -> List[ipaddress.IPv4Address]: + used_set = {int(h) for h in used_hosts} + result: List[ipaddress.IPv4Address] = [] + start = int(pool.network_address) + 1 + max(0 ,reserve_first) + end = int(pool.broadcast_address) - 1 + for val in range(start ,end+1): + if val not in used_set: + result.append(ipaddress.IPv4Address(val)) + if len(result) >= count: + break + if len(result) < count: + raise RuntimeError(f"address pool exhausted in {pool} (needed {count} more)") + return result + + +def plan_address_updates( + rows: Sequence[Tuple[int ,str ,Optional[str]]] + ,pool_cidr: str + ,assign_prefix: int + ,reserve_first: int + ,kmap: Dict[str ,Optional[str]] +) -> Tuple[Dict[int ,str] ,List[str]]: + notes: List[str] = [] + pool = ipaddress.IPv4Network(pool_cidr ,strict=False) + if pool.version != 4: + raise ValueError("only IPv4 pools supported") + + kernel_present = [c for c in kmap.values() if c] + db_present = [c for (_i ,_n ,c) in rows if c] + used_hosts = ( + _collect_used_hosts_from(kernel_present ,pool) + + _collect_used_hosts_from(db_present ,pool) + ) + + alloc_targets: List[Tuple[int ,str]] = [] + updates: Dict[int ,str] = {} + + for iface_id ,iface_name ,db_cidr in rows: + k_cidr = kmap.get(iface_name) + + if k_cidr: + if db_cidr != k_cidr: + updates[iface_id] = k_cidr + if db_cidr: + notes.append(f"sync: iface '{iface_name}' DB {db_cidr} -> kernel {k_cidr}") + else: + notes.append(f"sync: iface '{iface_name}' set from kernel {k_cidr}") + continue + + if db_cidr: + notes.append(f"note: iface '{iface_name}' has DB {db_cidr} but no kernel IPv4") + continue + + alloc_targets.append((iface_id ,iface_name)) + + if alloc_targets: + free = _first_free_hosts(len(alloc_targets) ,used_hosts ,pool ,reserve_first=reserve_first) + for idx ,(iface_id ,iface_name) in enumerate(alloc_targets): + cidr = f"{free[idx]}/{assign_prefix}" + updates[iface_id] = cidr + notes.append(f"assign: iface '{iface_name}' -> {cidr} (from pool {pool_cidr})") + + return (updates ,notes) + + +def reconcile_kernel_and_db_ipv4_addresses( + conn: sqlite3.Connection + ,pool_cidr: str = "10.0.0.0/16" + ,assign_prefix: int = 32 + ,reserve_first: int = 0 + ,dry_run: bool = False +) -> Tuple[int ,List[str]]: + rows = fetch_ifaces(conn) + iface_names = [n for (_i ,n ,_c) in rows] + kmap = kernel_ipv4_map(iface_names) + + updates ,notes = plan_address_updates( + rows + ,pool_cidr + ,assign_prefix + ,reserve_first + ,kmap + ) + if not updates: + return (0 ,notes or ["noop: nothing to change"]) + if dry_run: + return (0 ,notes) + + updated = update_iface_addresses(conn ,updates) + return (updated ,notes) + + +# --- thin CLI --- + +def main(argv=None) -> int: + ap = argparse.ArgumentParser() + ap.add_argument("--pool" ,type=str ,default="10.0.0.0/16") + ap.add_argument("--assign-prefix" ,type=int ,default=32) + ap.add_argument("--reserve-first" ,type=int ,default=0) + ap.add_argument("--dry-run" ,action="store_true") + args = ap.parse_args(argv) + with ic.open_db() as conn: + updated ,notes = reconcile_kernel_and_db_ipv4_addresses( + conn + ,pool_cidr=args.pool + ,assign_prefix=args.assign_prefix + ,reserve_first=args.reserve_first + ,dry_run=args.dry_run + ) + if notes: + print("\n".join(notes)) + if not args.dry_run: + print(f"updated rows: {updated}") + return 0 + + +if __name__ == "__main__": + import sys + sys.exit(main()) diff --git a/developer/tunnel-client/db_init_ip_table_registration.py b/developer/tunnel-client/db_init_ip_table_registration.py new file mode 100755 index 0000000..8436a2d --- /dev/null +++ b/developer/tunnel-client/db_init_ip_table_registration.py @@ -0,0 +1,159 @@ +#!/usr/bin/env python3 +""" +db_init_ip_table_registration.py + +Business API: + assign_missing_rt_table_ids(conn ,low=20000 ,high=29999 ,dry_run=False) + -> (updated_count ,planned_map ,notes) + +Policy: +- Effective table name per iface is COALESCE(rt_table_name ,iface). +- If that name exists in /etc/iproute2/rt_tables, reuse its number. +- Else allocate first free number in [low ,high]. +- Writes DB only. Does NOT write rt_tables. +""" + +from __future__ import annotations +import argparse +import sqlite3 +from pathlib import Path +from typing import Dict ,Iterable ,List ,Optional ,Sequence ,Tuple + +import incommon as ic # for CLI path only + +RT_TABLES_PATH = Path("/etc/iproute2/rt_tables") + + +def parse_rt_tables(path: Path) -> Tuple[List[str] ,Dict[str ,int] ,Dict[int ,str]]: + text = path.read_text() if path.exists() else "" + lines = text.splitlines() + name_to_num: Dict[str ,int] = {} + num_to_name: Dict[int ,str] = {} + for ln in lines: + s = ln.strip() + if not s or s.startswith("#"): + continue + parts = s.split() + if len(parts) >= 2 and parts[0].isdigit(): + n = int(parts[0]); name = parts[1] + if name not in name_to_num and n not in num_to_name: + name_to_num[name] = n + num_to_name[n] = name + return (lines ,name_to_num ,num_to_name) + + +def first_free_id(used: Iterable[int] ,low: int ,high: int) -> int: + used_set = set(u for u in used if low <= u <= high) + for n in range(low ,high+1): + if n not in used_set: + return n + raise RuntimeError(f"no free routing-table IDs in [{low},{high}]") + + +def fetch_effective_ifaces(conn: sqlite3.Connection) -> List[Tuple[int ,str ,Optional[int]]]: + sql = """ + SELECT i.id, + COALESCE(i.rt_table_name, i.iface) AS eff_name, + i.rt_table_id + FROM Iface i + ORDER BY i.id; + """ + cur = conn.execute(sql) + rows = cur.fetchall() + return [ + (int(r[0]) ,str(r[1]) ,(int(r[2]) if r[2] is not None else None)) + for r in rows + ] + + +def update_rt_ids(conn: sqlite3.Connection ,updates: Dict[int ,int]) -> int: + if not updates: + return 0 + with conn: + for iface_id ,rt_id in updates.items(): + conn.execute("UPDATE Iface SET rt_table_id=? WHERE id=?" ,(rt_id ,iface_id)) + return len(updates) + + +def plan_rt_id_assignments( + ifaces: Sequence[Tuple[int ,str ,Optional[int]]] + ,name_to_num_sys: Dict[str ,int] + ,existing_ids_in_db: Iterable[int] + ,low: int + ,high: int +) -> Dict[int ,int]: + used_numbers = set(int(x) for x in existing_ids_in_db) | set(name_to_num_sys.values()) + planned: Dict[int ,int] = {} + + names_seen: Dict[str ,int] = {} + for iface_id ,eff_name ,_ in ifaces: + if eff_name in names_seen and names_seen[eff_name] != iface_id: + raise RuntimeError( + f"duplicate effective table name in DB: '{eff_name}' used by Iface.id {names_seen[eff_name]} and {iface_id}" + ) + names_seen[eff_name] = iface_id + + for iface_id ,eff_name ,current_id in ifaces: + if current_id is not None: + used_numbers.add(int(current_id)) + continue + if eff_name in name_to_num_sys: + rt_id = int(name_to_num_sys[eff_name]) + else: + rt_id = first_free_id(used_numbers ,low ,high) + planned[iface_id] = rt_id + used_numbers.add(rt_id) + + return planned + + +def assign_missing_rt_table_ids( + conn: sqlite3.Connection + ,low: int = 20000 + ,high: int = 29999 + ,dry_run: bool = False +) -> Tuple[int ,Dict[int ,int] ,List[str]]: + _ ,name_to_num_sys ,_ = parse_rt_tables(RT_TABLES_PATH) + notes: List[str] = [] + + rows = fetch_effective_ifaces(conn) + existing_ids = [r[2] for r in rows if r[2] is not None] + planned = plan_rt_id_assignments(rows ,name_to_num_sys ,existing_ids ,low ,high) + + if not planned: + return (0 ,{} ,["noop: all Iface.rt_table_id already set"]) + + for iface_id ,eff_name ,current in rows: + if iface_id in planned: + notes.append(f"Iface.id={iface_id} name='{eff_name}' rt_table_id: {current} -> {planned[iface_id]}") + + if dry_run: + return (0 ,planned ,notes) + + updated = update_rt_ids(conn ,planned) + return (updated ,planned ,notes) + + +# --- thin CLI --- + +def main(argv=None) -> int: + ap = argparse.ArgumentParser() + ap.add_argument("--low" ,type=int ,default=20000) + ap.add_argument("--high" ,type=int ,default=29999) + ap.add_argument("--dry-run" ,action="store_true") + args = ap.parse_args(argv) + if args.low < 0 or args.high < args.low: + print(f"error: invalid range [{args.low},{args.high}]") + return 2 + with ic.open_db() as conn: + updated ,_planned ,notes = assign_missing_rt_table_ids(conn ,low=args.low ,high=args.high ,dry_run=args.dry_run) + if notes: + print("\n".join(notes)) + if not args.dry_run: + print(f"updated rows: {updated}") + return 0 + + +if __name__ == "__main__": + import sys + sys.exit(main()) diff --git a/developer/tunnel-client/db_init_route_defaults.py b/developer/tunnel-client/db_init_route_defaults.py new file mode 100644 index 0000000..857f27b --- /dev/null +++ b/developer/tunnel-client/db_init_route_defaults.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +""" +db_init_route_defaults.py + +Business API: + seed_default_routes(conn ,iface_names ,overwrite=False ,metric=None) + -> (inserted_count ,notes[list]) + +What it does: +- For each iface in iface_names, ensure a default route "0.0.0.0/0" + is present in the Route table (on_up=1, no via/metric/table override). +- If overwrite=True, it first deletes existing Route rows for those ifaces, + then inserts the defaults. +- Writes **DB only**. It does not touch the kernel or /etc/iproute2/rt_tables. + +Why: +- Your apply script reads Route rows and emits `ip -4 route replace … table `. + Seeding a per-iface default route makes policy-routed tables usable out of the box. +""" + +from __future__ import annotations +import argparse +import sqlite3 +from typing import Dict ,Iterable ,List ,Optional ,Sequence ,Tuple + +# import helper to open DB when run as CLI; the business API accepts a conn +try: + import incommon as ic # type: ignore +except Exception: + ic = None # ok when used as a lib + + +def _iface_map(conn: sqlite3.Connection ,iface_names: Sequence[str]) -> Dict[str ,int]: + """Return {iface_name -> iface_id} for provided names (must exist).""" + if not iface_names: + return {} + ph = ",".join("?" for _ in iface_names) + sql = f"""SELECT id ,iface FROM Iface WHERE iface IN ({ph}) ORDER BY id;""" + rows = conn.execute(sql ,tuple(iface_names)).fetchall() + found = {str(name): int(iid) for (iid ,name) in rows} + missing = [n for n in iface_names if n not in found] + if missing: + raise RuntimeError(f"iface(s) not found: {', '.join(missing)}") + return found + + +def _existing_defaults(conn: sqlite3.Connection ,iface_ids: Iterable[int]) -> Dict[int ,bool]: + """Return {iface_id -> True/False} whether a default route row already exists (on_up=1).""" + ids = list(iface_ids) + if not ids: + return {} + ph = ",".join("?" for _ in ids) + sql = f""" + SELECT iface_id ,COUNT(1) + FROM Route + WHERE iface_id IN ({ph}) + AND cidr='0.0.0.0/0' + AND on_up=1 + GROUP BY iface_id; + """ + out: Dict[int ,bool] = {i: False for i in ids} + for iid ,cnt in conn.execute(sql ,tuple(ids)).fetchall(): + out[int(iid)] = int(cnt) > 0 + return out + + +def seed_default_routes( + conn: sqlite3.Connection + ,iface_names: Sequence[str] + ,overwrite: bool = False + ,metric: Optional[int] = None +) -> Tuple[int ,List[str]]: + """ + Upsert per-iface default routes into Route. + + Inserts rows: + (iface_id ,cidr='0.0.0.0/0' ,via=NULL ,table_name=NULL ,metric= ,on_up=1 ,on_down=0) + """ + if not iface_names: + raise RuntimeError("no interfaces provided") + + id_map = _iface_map(conn ,iface_names) + iface_ids = list(id_map.values()) + notes: List[str] = [] + inserted = 0 + + with conn: + if overwrite: + ph = ",".join("?" for _ in iface_ids) + conn.execute(f"DELETE FROM Route WHERE iface_id IN ({ph});" ,tuple(iface_ids)) + notes.append(f"cleared existing Route rows for: {', '.join(iface_names)}") + + exists = _existing_defaults(conn ,iface_ids) + + for name in iface_names: + iid = id_map[name] + if exists.get(iid): + notes.append(f"keep: default route already present for {name}") + continue + conn.execute( + """ + INSERT INTO Route(iface_id ,cidr ,via ,table_name ,metric ,on_up ,on_down + ,created_at ,updated_at) + VALUES( ? ,'0.0.0.0/0' ,NULL ,NULL ,? ,1 ,0 + ,strftime('%Y-%m-%dT%H:%M:%SZ','now') ,strftime('%Y-%m-%dT%H:%M:%SZ','now')) + """ + ,(iid ,metric) + ) + inserted += 1 + notes.append(f"add: default route 0.0.0.0/0 for {name}") + + return (inserted ,notes) + + +# ---- thin CLI for ad-hoc use ---- + +def main(argv: Optional[Sequence[str]] = None) -> int: + ap = argparse.ArgumentParser(description="Seed per-iface default Route rows.") + ap.add_argument("ifaces" ,nargs="+") + ap.add_argument("--overwrite" ,action="store_true") + ap.add_argument("--metric" ,type=int ,default=None) + args = ap.parse_args(argv) + + if ic is None: + print("error: cannot locate incommon.open_db() for CLI use") + return 2 + + with ic.open_db() as conn: + n ,notes = seed_default_routes(conn ,args.ifaces ,overwrite=args.overwrite ,metric=args.metric) + if notes: + print("\n".join(notes)) + print(f"inserted: {n}") + return 0 + + +if __name__ == "__main__": + import sys + sys.exit(main()) diff --git a/developer/tunnel-client/db_init_server_US.py b/developer/tunnel-client/db_init_server_US.py new file mode 100755 index 0000000..d8cfcd0 --- /dev/null +++ b/developer/tunnel-client/db_init_server_US.py @@ -0,0 +1,17 @@ +# db_init_server_US.py +from db_init_server_incommon import upsert_server + +def init_server_US(conn): + # Endpoint from the historical config; adjust if needed + return upsert_server( + conn, + client_iface="US", + server_name="US", + server_public_key="h8ZYEEVMForvv9p5Wx+9+eZ87t692hTN7sks5Noedw8=", + endpoint_host="35.194.71.194", + endpoint_port=443, + allowed_ips="0.0.0.0/0", + keepalive_s=25, + route_allowed_ips=0, + priority=100, + ) diff --git a/developer/tunnel-client/db_init_server_incommon.py b/developer/tunnel-client/db_init_server_incommon.py new file mode 100644 index 0000000..18edb1f --- /dev/null +++ b/developer/tunnel-client/db_init_server_incommon.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# Helpers to upsert a row in server bound to a client iface. + +from __future__ import annotations +import sqlite3 +from typing import Optional, Any, Dict +import incommon as ic # provides open_db, get_client_id + +def upsert_server(conn: sqlite3.Connection, + *, + client_iface: str, + server_name: str, + server_public_key: str, + endpoint_host: str, + endpoint_port: int, + allowed_ips: str, + preshared_key: Optional[str] = None, + keepalive_s: Optional[int] = None, + route_allowed_ips: int = 0, + priority: int = 100) -> str: + cid = ic.get_client_id(conn, client_iface) + + row = conn.execute( + "SELECT id, public_key, preshared_key, endpoint_host, endpoint_port, allowed_ips, " + " keepalive_s, route_allowed_ips, priority " + "FROM server WHERE iface_id=? AND name=? LIMIT 1;", + (cid, server_name), + ).fetchone() + + desired = { + "public_key": server_public_key, + "preshared_key": preshared_key, + "endpoint_host": endpoint_host, + "endpoint_port": endpoint_port, + "allowed_ips": allowed_ips, + "keepalive_s": keepalive_s, + "route_allowed_ips": route_allowed_ips, + "priority": priority, + } + + if row is None: + q = ( + "INSERT INTO server (iface_id,name,public_key,preshared_key," + " endpoint_host,endpoint_port,allowed_ips,keepalive_s,route_allowed_ips,priority," + " created_at,updated_at) " + "VALUES (?,?,?,?,?,?,?,?,?,?, strftime('%Y-%m-%dT%H:%M:%SZ','now'), strftime('%Y-%m-%dT%H:%M:%SZ','now'));" + ) + params = (cid, server_name, desired["public_key"], desired["preshared_key"], + desired["endpoint_host"], desired["endpoint_port"], desired["allowed_ips"], + desired["keepalive_s"], desired["route_allowed_ips"], desired["priority"]) + cur = conn.execute(q, params); conn.commit() + return f"seeded: server(name={server_name}) client={client_iface} id={cur.lastrowid}" + else: + sid, pub, psk, host, port, allow, ka, route_ai, prio = row + current = { + "public_key": pub, "preshared_key": psk, "endpoint_host": host, "endpoint_port": port, + "allowed_ips": allow, "keepalive_s": ka, "route_allowed_ips": route_ai, "priority": prio + } + changes: Dict[str, Any] = {k: v for k, v in desired.items() if v != current.get(k)} + if not changes: + return f"ok: server(name={server_name}) client={client_iface} unchanged id={sid}" + sets = ", ".join(f"{k}=?" for k in changes) + params = list(changes.values()) + [cid, server_name] + conn.execute( + f"UPDATE server SET {sets}, updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') " + "WHERE iface_id=? AND name=?;", params + ) + conn.commit() + return f"updated: server(name={server_name}) client={client_iface} id={sid} " + " ".join(f"{k}={changes[k]}" for k in changes) diff --git a/developer/tunnel-client/db_init_server_x6.py b/developer/tunnel-client/db_init_server_x6.py new file mode 100755 index 0000000..3377d91 --- /dev/null +++ b/developer/tunnel-client/db_init_server_x6.py @@ -0,0 +1,16 @@ +# db_init_server_x6.py +from db_init_server_incommon import upsert_server + +def init_server_x6(conn): + return upsert_server( + conn, + client_iface="x6", + server_name="x6", + server_public_key="pcbDlC1ZVoBYaN83/zAsvIvhgw0iQOL1YZKX5hcAqno=", + endpoint_host="66.248.243.113", + endpoint_port=51820, + allowed_ips="0.0.0.0/0", + keepalive_s=25, + route_allowed_ips=0, + priority=100, + ) diff --git a/developer/tunnel-client/db_schema.sql b/developer/tunnel-client/db_schema.sql new file mode 100644 index 0000000..cf9cdb0 --- /dev/null +++ b/developer/tunnel-client/db_schema.sql @@ -0,0 +1,118 @@ +PRAGMA foreign_keys = ON; +PRAGMA journal_mode = WAL; +PRAGMA user_version = 300; -- v3.00: singular, capitalized tables; private_key removed + +-- meta first (so later INSERTs succeed) +CREATE TABLE IF NOT EXISTS Meta ( + key TEXT PRIMARY KEY + ,value TEXT NOT NULL +); +INSERT OR REPLACE INTO Meta(key,value) VALUES ('schema','wg-client-v3.00-Ifaces'); +INSERT OR IGNORE INTO Meta(key,value) VALUES ('subu_cidr','10.0.0.0/24'); + +-- Iface, interface, device, netdevice, link — table of them +CREATE TABLE IF NOT EXISTS Iface ( + id INTEGER PRIMARY KEY + ,created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) + ,updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) + ,iface TEXT NOT NULL UNIQUE -- kernel interface name as shown by ip link (e.g., wg0, x6) + ,rt_table_id INTEGER -- e.g. 1002, unused + ,rt_table_name TEXT -- if NULL, default to iface (see view) + -- legacy caches (kept for compatibility; may be NULL) + ,bound_user TEXT + ,bound_uid INTEGER + ,local_address_cidr TEXT -- e.g. '10.8.0.2/32' + -- secrets: private key is NO LONGER stored in DB (lives under key/) + ,public_key TEXT CHECK (public_key IS NULL OR length(public_key) BETWEEN 43 AND 45) + ,mtu INTEGER + ,fwmark INTEGER + ,dns_mode TEXT NOT NULL DEFAULT 'none' CHECK (dns_mode IN ('none','static')) + ,dns_servers TEXT + ,autostart INTEGER NOT NULL DEFAULT 0 +); + +-- Server (one or more remote peers for an Iface) +CREATE TABLE IF NOT EXISTS Server ( + id INTEGER PRIMARY KEY + ,created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) + ,updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) + ,iface_id INTEGER NOT NULL REFERENCES Iface(id) ON DELETE CASCADE + ,name TEXT NOT NULL -- e.g. 'x6', 'US' + ,public_key TEXT NOT NULL CHECK (length(public_key) BETWEEN 43 AND 45) + ,preshared_key TEXT CHECK (preshared_key IS NULL OR length(preshared_key) BETWEEN 43 AND 45) + ,endpoint_host TEXT NOT NULL + ,endpoint_port INTEGER NOT NULL CHECK (endpoint_port BETWEEN 1 AND 65535) + ,allowed_ips TEXT NOT NULL -- typically '0.0.0.0/0' + ,keepalive_s INTEGER + ,route_allowed_ips INTEGER NOT NULL DEFAULT 1 + ,priority INTEGER NOT NULL DEFAULT 100 + ,UNIQUE(iface_id, name) +); + +-- Route (optional extra routes applied by post-up script) +CREATE TABLE IF NOT EXISTS Route ( + id INTEGER PRIMARY KEY + ,created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) + ,updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) + ,iface_id INTEGER NOT NULL REFERENCES Iface(id) ON DELETE CASCADE + ,cidr TEXT NOT NULL + ,via TEXT + ,table_name TEXT + ,metric INTEGER + ,on_up INTEGER NOT NULL DEFAULT 1 + ,on_down INTEGER NOT NULL DEFAULT 0 +); + +-- User (many linux users → one Iface) +-- each user is bound to an iface via an 'ip rule add uidrange ..' command +CREATE TABLE IF NOT EXISTS User ( + id INTEGER PRIMARY KEY + ,created_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) + ,updated_at TEXT NOT NULL DEFAULT (strftime('%Y-%m-%dT%H:%M:%SZ','now')) + ,iface_id INTEGER NOT NULL REFERENCES Iface(id) ON DELETE CASCADE + ,username TEXT NOT NULL + ,uid INTEGER -- cached UID if resolved + ,UNIQUE(iface_id, username) +); + +-- Effective view (provides computed defaults like rt_table_name_eff) +CREATE VIEW IF NOT EXISTS v_iface_effective AS +SELECT + i.id + ,i.iface + ,COALESCE(i.rt_table_name, i.iface) AS rt_table_name_eff + ,i.local_address_cidr +FROM Iface i; + +-- mtime triggers +CREATE TRIGGER IF NOT EXISTS trg_iface_mtime +AFTER UPDATE ON Iface FOR EACH ROW +BEGIN + UPDATE Iface + SET updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') + WHERE id=NEW.id; +END; + +CREATE TRIGGER IF NOT EXISTS trg_server_mtime +AFTER UPDATE ON Server FOR EACH ROW +BEGIN + UPDATE Server + SET updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') + WHERE id=NEW.id; +END; + +CREATE TRIGGER IF NOT EXISTS trg_route_mtime +AFTER UPDATE ON Route FOR EACH ROW +BEGIN + UPDATE Route + SET updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') + WHERE id=NEW.id; +END; + +CREATE TRIGGER IF NOT EXISTS trg_user_binding_mtime +AFTER UPDATE ON User FOR EACH ROW +BEGIN + UPDATE User + SET updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') + WHERE id=NEW.id; +END; diff --git a/developer/tunnel-client/db_schema_load.sh b/developer/tunnel-client/db_schema_load.sh new file mode 100755 index 0000000..d4718bf --- /dev/null +++ b/developer/tunnel-client/db_schema_load.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# db_init.sh — create/upgrade db/store by loading schema.sql (idempotent) + +set -euo pipefail +DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" +DB="$DIR/db/store" +SCHEMA="$DIR/db_schema.sql" + +command -v sqlite3 >/dev/null || { echo "❌ sqlite3 not found"; exit 1; } +[[ -f "$SCHEMA" ]] || { echo "❌ schema file missing: $SCHEMA"; exit 1; } + +if [[ -f "$DB" ]]; then + ts="$(date -u +%Y%m%dT%H%M%SZ)" + cp -f -- "$DB" "$DB.bak-$ts" + echo "↩︎ Backed up existing DB to $DB.bak-$ts" +fi + +sqlite3 -cmd '.bail on' "$DB" < "$SCHEMA" + +ver="$(sqlite3 "$DB" 'PRAGMA user_version;')" +echo "✔ DB ready: $DB (user_version=$ver)" +echo " Tables:" +sqlite3 -noheader -list "$DB" "SELECT name FROM sqlite_master WHERE type='table' ORDER BY name;" diff --git a/developer/tunnel-client/db_wipe.py b/developer/tunnel-client/db_wipe.py new file mode 100755 index 0000000..d0eb4ec --- /dev/null +++ b/developer/tunnel-client/db_wipe.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +""" +db_wipe.py + +Remove regular (non-directory) files in ./db, keeping the directory. + +Safety +- Refuses to run if the target directory does not exist or its basename is not exactly "db". +- Prints a plan, then asks "Are you sure? [y/N]" unless --force is used. +- --dry-run prints what would be removed without deleting. +- Hidden files (names starting with '.') are preserved by default; use --include-hidden to delete them too. + +Usage + ./db_wipe.py # plan + prompt, non-hidden files only, ./db next to this script + ./db_wipe.py --force # no prompt + ./db_wipe.py --dry-run # show what would be deleted + ./db_wipe.py --include-hidden + ./db_wipe.py --db /path/to/db +""" + +from __future__ import annotations +from pathlib import Path +from typing import Iterable, List, Tuple +import argparse +import sys +import os + +# ---------- business ---------- + +def plan_db_wipe(db_dir: Path, include_hidden: bool = False) -> List[Path]: + """ + Return a sorted list of file Paths (depth=1) to delete from db_dir. + """ + if not db_dir.exists(): + raise FileNotFoundError(f"not found: {db_dir}") + if not db_dir.is_dir(): + raise NotADirectoryError(f"not a directory: {db_dir}") + if db_dir.name != "db": + raise RuntimeError(f"expected directory named 'db', got: {db_dir.name}") + + def _is_hidden(p: Path) -> bool: + return p.name.startswith(".") + + files = [p for p in db_dir.iterdir() if p.is_file()] + if not include_hidden: + files = [p for p in files if not _is_hidden(p)] + + # Sort by name for stable output + return sorted(files, key=lambda p: p.name) + + +def wipe_db( + db_dir: Path, + include_hidden: bool = False, + dry_run: bool = False, + assume_yes: bool = False, + _prompt_fn=input, +) -> Tuple[int, List[str]]: + """ + Delete planned files from db_dir. Returns (deleted_count, logs). + Does not prompt if assume_yes=True or dry_run=True. + """ + targets = plan_db_wipe(db_dir, include_hidden=include_hidden) + + logs: List[str] = [] + script_dir = Path(__file__).resolve().parent + + if not targets: + logs.append(f"db_wipe: no matching files in: {db_dir.relative_to(script_dir)}") + return (0, logs) + + logs.append("db_wipe: plan") + for p in targets: + # Show path relative to script directory like the original + rel = p.resolve().relative_to(script_dir) + logs.append(f" delete: {rel}") + + if dry_run: + logs.append("db_wipe: dry-run; no changes made") + return (0, logs) + + if not assume_yes: + print("\n".join(logs)) + try: + ans = _prompt_fn("Are you sure? [y/N] ").strip().lower() + except EOFError: + ans = "" + if ans not in ("y", "yes"): + logs.append("db_wipe: aborted") + return (0, logs) + + deleted = 0 + for p in targets: + try: + p.unlink(missing_ok=True) # py3.8+: if not available, catch FileNotFoundError + deleted += 1 + except FileNotFoundError: + # Equivalent to rm -f + pass + + rel_db = db_dir.resolve().relative_to(script_dir) + logs.append(f"db_wipe: deleted {deleted} file(s) from {rel_db}") + return (deleted, logs) + + +# ---------- CLI wrapper ---------- + +def _default_db_dir() -> Path: + return Path(__file__).resolve().parent / "db" + +def main(argv: list[str] | None = None) -> int: + ap = argparse.ArgumentParser(description="Remove regular files in ./db, keeping the directory.") + ap.add_argument("--db", default=str(_default_db_dir()), help="path to the db directory (default: ./db next to this script)") + ap.add_argument("--force", action="store_true", help="do not prompt for confirmation") + ap.add_argument("--dry-run", action="store_true", help="print what would be removed without deleting") + ap.add_argument("--include-hidden", action="store_true", help="include dotfiles (e.g., .gitignore)") + args = ap.parse_args(argv) + + db_dir = Path(args.db) + + try: + deleted, logs = wipe_db( + db_dir=db_dir, + include_hidden=args.include_hidden, + dry_run=args.dry_run, + assume_yes=args.force or args.dry_run, + ) + if logs: + print("\n".join(logs)) + return 0 + except (FileNotFoundError, NotADirectoryError, RuntimeError) as e: + print(f"❌ {e}", file=sys.stderr) + return 1 + except Exception as e: + print(f"❌ unexpected error: {e}", file=sys.stderr) + return 2 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/developer/tunnel-client/deploy_StanleyPark.py b/developer/tunnel-client/deploy_StanleyPark.py new file mode 100755 index 0000000..933311c --- /dev/null +++ b/developer/tunnel-client/deploy_StanleyPark.py @@ -0,0 +1,216 @@ +#!/usr/bin/env python3 +""" +deploy_StanleyPark.py — stop → install staged files → start (for selected ifaces) + +- Requires root. Exits after reporting *all* detected CLI/import errors. +- Calls business functions directly: + * stop_clean_iface.stop_clean_ifaces(ifaces) + * install_staged_tree.install_staged_tree(stage_root, dest_root, create_dirs, skip_identical) + * start_iface.start_ifaces(ifaces) +- If no ifaces provided on CLI, it discovers them from the stage tree. + +Usage: + sudo ./deploy_StanleyPark.py # discover ifaces from stage, stop→install→start + sudo ./deploy_StanleyPark.py x6 US # explicit iface list + sudo ./deploy_StanleyPark.py --no-stop # skip stop step + sudo ./deploy_StanleyPark.py --no-start # skip start step + sudo ./deploy_StanleyPark.py --stage ./stage --root / --create-dirs +""" + +from __future__ import annotations +from pathlib import Path +from typing import List, Sequence, Tuple +import argparse +import os +import sys +import traceback + +ROOT = Path(__file__).resolve().parent +sys.path.insert(0, str(ROOT)) # ensure sibling modules importable + +# --- lightweight staged-iface discovery (duplicated here to avoid importing internals) --- +def _discover_ifaces_from_stage(stage_root: Path) -> List[str]: + names = set() + # from /etc/wireguard/.conf + wg_dir = stage_root / "etc" / "wireguard" + if wg_dir.is_dir(): + for p in wg_dir.glob("*.conf"): + names.add(p.stem) + # from /etc/systemd/system/wg-quick@.service.d/ + sysd = stage_root / "etc" / "systemd" / "system" + if sysd.is_dir(): + for d in sysd.glob("wg-quick@*.service.d"): + nm = d.name # wg-quick@IFACE.service.d + at = nm.find("@") + dot = nm.find(".service.d") + if at != -1 and dot != -1 and dot > at: + names.add(nm[at+1:dot]) + return sorted(names) + +def _is_root() -> bool: + try: + return os.geteuid() == 0 + except AttributeError: + # Non-POSIX: best effort + return False + +def _validate_iface_name(n: str) -> bool: + # conservative: letters, digits, dash, underscore (WireGuard allows more, but keep it safe) + import re + return bool(re.fullmatch(r"[A-Za-z0-9_-]{1,32}", n)) + +def _collect_errors(args) -> Tuple[List[str], List[str]]: + """ + Return (errors, ifaces). Does *not* raise. + """ + errors: List[str] = [] + + # Root required + if not _is_root(): + errors.append("must be run as root (sudo)") + + # Stage root + stage_root = Path(args.stage) + if not stage_root.exists(): + errors.append(f"stage path does not exist: {stage_root}") + + # Import modules + inst_mod = None + stop_mod = None + start_mod = None + try: + import install_staged_tree as inst_mod # type: ignore + except Exception as e: + errors.append(f"failed to import install_staged_tree: {e}") + try: + import stop_clean_iface as stop_mod # type: ignore + except Exception as e: + errors.append(f"failed to import stop_clean_iface: {e}") + try: + import start_iface as start_mod # type: ignore + except Exception as e: + errors.append(f"failed to import start_iface: {e}") + + # Business functions existence (only if imports worked) + if inst_mod is not None and not hasattr(inst_mod, "install_staged_tree"): + errors.append("install_staged_tree module missing function: install_staged_tree") + if stop_mod is not None and not hasattr(stop_mod, "stop_clean_ifaces"): + errors.append("stop_clean_iface module missing function: stop_clean_ifaces") + if start_mod is not None and not hasattr(start_mod, "start_ifaces"): + errors.append("start_iface module missing function: start_ifaces") + + # Ifaces + ifaces: List[str] + if args.ifaces: + ifaces = list(dict.fromkeys(args.ifaces)) # dedup preserve order + else: + ifaces = _discover_ifaces_from_stage(stage_root) + if not ifaces: + errors.append("no interfaces provided and none discovered from stage") + else: + bad = [n for n in ifaces if not _validate_iface_name(n)] + if bad: + errors.append(f"invalid iface name(s): {', '.join(bad)}") + + return (errors, ifaces) + +def deploy_StanleyPark( + ifaces: Sequence[str], + stage_root: Path, + dest_root: Path, + create_dirs: bool, + skip_identical: bool, + do_stop: bool, + do_start: bool, +) -> int: + """ + Orchestration: stop (optional) → install → start (optional). + """ + # Late imports so unit tests can monkeypatch easily + import install_staged_tree as inst + import stop_clean_iface as stopm + import start_iface as startm + + print(f"Deploy plan:\n ifaces: {', '.join(ifaces)}\n stage: {stage_root}\n root: {dest_root}\n") + + # Stop + if do_stop: + print(f"Stopping: {' '.join(ifaces)}") + try: + stop_logs = stopm.stop_clean_ifaces(ifaces) + if isinstance(stop_logs, (list, tuple)): + for line in stop_logs: + print(line) + except Exception: + print("warn: stop_clean_ifaces raised an exception (continuing):") + traceback.print_exc() + + # Install + print("\nInstalling staged artifacts…") + try: + logs, detected = inst.install_staged_tree( + stage_root=stage_root, + dest_root=dest_root, + create_dirs=create_dirs, + skip_identical=skip_identical, + ) + for line in logs: + print(line) + except Exception: + print("❌ install failed with exception:", file=sys.stderr) + traceback.print_exc() + return 2 + + # Start + if do_start: + # Prefer explicit ifaces; fall back to what installer detected + start_list = list(ifaces) if ifaces else list(detected) + if not start_list: + print("\nNo interfaces to start (none detected).") + else: + print(f"\nStarting: {' '.join(start_list)}") + try: + start_logs = startm.start_ifaces(start_list) + if isinstance(start_logs, (list, tuple)): + for line in start_logs: + print(line) + except Exception: + print("warn: start_ifaces raised an exception:", file=sys.stderr) + traceback.print_exc() + return 2 + + print("\n✓ Deploy complete.") + return 0 + +def main(argv: List[str] | None = None) -> int: + ap = argparse.ArgumentParser(description="Deploy staged WG artifacts for StanleyPark (stop→install→start).") + ap.add_argument("ifaces", nargs="*", help="interfaces to manage (default: discover from stage)") + ap.add_argument("--stage", default=str(ROOT / "stage"), help="stage root (default: ./stage)") + ap.add_argument("--root", default="/", help="destination root (default: /)") + ap.add_argument("--create-dirs", action="store_true", help="create missing parent directories") + ap.add_argument("--no-skip-identical", action="store_true", help="always replace even if content identical") + ap.add_argument("--no-stop", action="store_true", help="do not stop interfaces before install") + ap.add_argument("--no-start", action="store_true", help="do not start interfaces after install") + args = ap.parse_args(argv) + + # Collect all errors up front + errors, ifaces = _collect_errors(args) + if errors: + print("❌ deploy preflight found issue(s):", file=sys.stderr) + for e in errors: + print(f" - {e}", file=sys.stderr) + return 2 + + # Proceed + return deploy_StanleyPark( + ifaces=ifaces, + stage_root=Path(args.stage), + dest_root=Path(args.root), + create_dirs=args.create_dirs, + skip_identical=(not args.no_skip_identical), + do_stop=(not args.no_stop), + do_start=(not args.no_start), + ) + +if __name__ == "__main__": + sys.exit(main()) diff --git a/developer/tunnel-client/deprecated/.gitignore b/developer/tunnel-client/deprecated/.gitignore new file mode 100644 index 0000000..53642ce --- /dev/null +++ b/developer/tunnel-client/deprecated/.gitignore @@ -0,0 +1,4 @@ + +* +!.gitignore + diff --git a/developer/tunnel-client/doc_IP_terminaology.org b/developer/tunnel-client/doc_IP_terminaology.org new file mode 100644 index 0000000..8f6587b --- /dev/null +++ b/developer/tunnel-client/doc_IP_terminaology.org @@ -0,0 +1,98 @@ +#+TITLE: Interface vs Link vs Netdevice: a cynical field guide +#+AUTHOR: Thomas & Nerith (session) +#+LANGUAGE: en +#+OPTIONS: toc:2 num:t + +* TL;DR +In Linux networking (and in this doc), /interface/, /link/, and /netdevice/ can all refer to the same kernel object, e.g., wg0, x6, eth0. This conflation of terms came about because different tribes named the same thing differently. + +* What these words actually refer to +- interface: common admin usage for referring to said kernel network object. +- link: iproute2's vocabulary for said kernel network object (as in the command: `ip link show ` which gives information about said kernel network object). +- netdevice: the kernel's term (struct net_device under the hood) + +* Where the words come from +- Kernel engineers: /netdevice/ is the internal type that packets touch. +- iproute2 authors: named their subcommands by subsystem; the L2-ish one is /link/. Hence ip link, ip addr, ip route, ip rule, ip neigh. +- Humans: kept saying /interface/ because that was the word from ifconfig days and textbooks. + +* Cynic's guide to commands (map the terrain) +- ip link show x6 → show properties of interface x6 (state, mtu, type, flags); not L3 addresses or routes (here /link/ == /interface/) +- ip addr add A dev x6 → attach IP address A as a property of interface x6; this alone does not force source choice or egress path (here /dev/ = /interface/) +- ip route add dev x6 → write a route entry: map destination → interface x6 (here /dev/ = /interface/) +- ip rule add ... → write a policy rule that selects which routing table to consult +- ip neigh ... → view/manage the neighbor cache (ARP/ND) per interface; maps L3 neighbor → L2 address; not routing + + +* Device + +In computing, a /device/ is a piece of hardware. This includes printers, disk drives, memory cards, NIC cards, etc. An emulated device is software that is written to do the same thing as an actual device. This is sometimes done when compatibility with an old device is needed, but the old device is not available. A virtual device is software that is written to do the same thing as an imagined device. This is sometimes done to make available features that no physical device provides. A virtual device can also be state that is kept to support multiplexing a real device among many users, while giving each user the appearance of having sole ownership of said device. It is also common to call a device emulator a virtual device. + +In unix operating systems special files are used for interfacing to devices. Such an interface is often called a /device file/, which inevitably gets shortened to /device/. + +In networking, the kernel keeps state data for a device, and software drivers for shipping data to and from a device used for networking. Such software objects are often called /network devices/. The interface to the kernel used for talking to devices inevitably gets called a /device/. + +The terms, /physical device/, /device file/, and /netdevice/ are used to distinguish among the various possible meanings of /device/. We observe that generally terminology suffers due to a desire to flatten and thus simplify the discussion of the communication abstraction stack. + +* Interface + +An /interface/ is a shared surface between two systems. A user interface is the shared surface between a user and a system. E.g. the dashboard of a car is a user interface to the car. + +In software programming, an interface is a set of data and routines used for communication between software systems. For example, an API is a application programming interface. + +The OS provides named interfaces for communicating with network devices. Within the context of network programming, The literature will refer to such an interface as the /device/, /link/, or /interface/, the latter being the only term fitting the wider scope conventional definition. + +* Link + +A /link/ is a pathway that connects two systems. With an interface, there is no link, as the systems touch. A link has two interfaces, one on each end. Hence it was inevitable that a link interface would be called a /link/. And if the link connects to a device, then that link interface itself gets called a /device/. + +In iproute2 /link/ means the local endpoint object. Do not assume a remote counterpart exists just because you saw the word /link/. + +* Machine Peers + +- Client + +In these documents, the client machine is the local machine users are working on. Inevitably this gets shortened to /client/ in polite conversation. The example client used in this distribution is StanleyPark. That is a host name of a computer on our network. + +- Server + +In these document, the server machine is the remote machine that the write guard tunnels to. We have nicknames for machines. The example used here has the server nicknames of x6, and US. +These nicknames are also used for the names of the client machine side interface that connects to the tunnel that leads to said server machine. The nickname is also used for the name of the routing table on the client that routes traffic go said wireguard tunnel. + +Hence, a nickname, like x6 or US, refers to a machine, an interface, and an IP route table. + +* Software Peers + +Programs that run as daemons while listening for connections, and once connected to,k they provide services, are server programs. The program that connects to said software server is called a client program. You guessed it, the terms 'server program' and 'client program' often get shortened to /server/ and /client/. + + +* WireGuard mini-map +We will use this terminology: + +- We will consider that WireGuard is conceptually a virtual device. +- There can be many interfaces to said WireGuard device, taking names like wg0 or x6. Each has a keypair, a listen port, and a set of peers. +- Config tools: "wg" (CLI, not a daemon), "wg-quick" (oneshot helper per interface). +- Reality check: + - ip link show type wireguard → lists all WG interfaces + - ip -d link show x6 → detailed information about the x6 interface + - wg show x6 → peer/crypto state for the x6 interface + +* Sanity tests you can run +#+begin_src sh +# list all WireGuard interfaces +ip link show type wireguard + +# detailed view of one interface +ip -d link show x6 + +# see handshake and byte counters +wg show x6 + +# show L3 addresses bound to an interface +ip addr show dev x6 + +# show routes in a named table (if you use policy routing) +ip route show table x6 +#+end_src + + diff --git a/developer/tunnel-client/doc_StanleyPark.org b/developer/tunnel-client/doc_StanleyPark.org new file mode 100644 index 0000000..292ec21 --- /dev/null +++ b/developer/tunnel-client/doc_StanleyPark.org @@ -0,0 +1,51 @@ + +1. create/update the client configuration files. + + These are the configuration files for the machine called StanleyPark, which is on + our local network. (Yes, we capitalize popper nouns, and thus have some "bad names".) + + db_init_StanleyPark.py + stage_StanleyPark + deploy_StanleyPark + + They are in Python. + +2. Wipe the database and the stage. + + Wiping the db will erase keys and any other client configurations. This does not effect already installed configuration files. Also, the database can always be rebuilt by running the client configuration files again. + + ./db_wipe.py + ./stage_wipe.py + +3. Setup the database + + ./db_init_StanleyPark + +4. setup the keys + + ./key_generate StanleyPark.py + ./key_server_set.py + + to see the keys in the database + + ./ls_key.py + + if the database was wiped, it will be necessary to key_generate again. Currently + there is one client machine key pair. + +5. stage the configuration files to be installed + + ./stage_StanleyPark + + check them make sure they are what you want + +6. install the staged files + + ./deploy_StanlwayPark + + +The goal here is work towards each subu as a container, with its networking tunneled +to the specified interface. Perhaps the configuration scripts should be subu based instead of client machine based. Perhaps in the next version. + + + diff --git a/developer/tunnel-client/doc_config.org b/developer/tunnel-client/doc_config.org new file mode 100644 index 0000000..2de0ee4 --- /dev/null +++ b/developer/tunnel-client/doc_config.org @@ -0,0 +1,9 @@ +-New interface: + +copy `db_init_iface_x6.py` to `db_init_iface_.py`, replacing with the name of the interface. Then edit `db_init_iface_.py` + +-New Client + +-New User + + diff --git a/developer/tunnel-client/doc_keys.org b/developer/tunnel-client/doc_keys.org new file mode 100644 index 0000000..e56bd76 --- /dev/null +++ b/developer/tunnel-client/doc_keys.org @@ -0,0 +1,14 @@ + +From the point of view of setting up the client (we are in the client setup directory after all): + +1. login to the server and get the server public key. + + This public key is written into the db_init_iface_>.py configuration file. Note the examples `db_init_iface_US.py` and `db_init_iface_x6`. `x6` and `US` are nicknames for two servers. These nicknames are also used for the interface names. + + Note that the server private key remains on the server. The client has no knowledge of the server private key. It is not entered anywhere in the client configuration. + +2. run the program `key_client_generate1 + + This will print the client public key. It will also place a copy in the database. + + This will write the client private key into a local directory called `key/`. The admin need not do anything concerning this key. Scripts that need it will find it in the 'key/' directory. diff --git a/developer/tunnel-client/doc_stage_progs.org b/developer/tunnel-client/doc_stage_progs.org new file mode 100644 index 0000000..a80f789 --- /dev/null +++ b/developer/tunnel-client/doc_stage_progs.org @@ -0,0 +1,42 @@ + +stage programs write to the stage directory. Later install copies from the stage +directory to a provided root, which if it is the local machine, will be '/'. + + +* stage_IP_register_route_table.py + + stages a replacement etc/iproute2/rt_tables file. + +* stage_wg_conf.py + + stages etc/wireguard/ conf files for the configured interfaces + +* stage_IP_routes_script.py + + 1. stages a shell script that when called writes the IP rule table. Said script binds UIDs to route tables. + + 2. stages a priority 10 systemd guard systemd dropin that will call said shell script when + WireGuard is started or restarted. + +* stage_IP_rules_script.py + + 1. stages a shell script that when called writes the required IP route tables + + 2. stages a priority 20 systemd guard systemd dropin that will call said shell script when + WireGuard is started or restarted. + +* stage_client_StanleyPark.py + + A local use client machine configuration file. Calls the other stage programs + while providing the correct parameters for configuring wireguard on the + machine StanleyPark. Typically these will be a database connection and a list of + users. + + The admin will write such a file for each machine he/she/ai is configuring. + +* stage_incommon.py + + Utility functions for stage programs. + + + diff --git a/developer/tunnel-client/iface_down.py b/developer/tunnel-client/iface_down.py new file mode 100755 index 0000000..a1e6474 --- /dev/null +++ b/developer/tunnel-client/iface_down.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# iface_down.py — stop wg-quick@ and remove uid→rt rules + +from __future__ import annotations +import os, sys, sqlite3, subprocess +import incommon as ic # provides open_db() + +def sh(args: list[str], check: bool=False) -> subprocess.CompletedProcess: + return subprocess.run(args, text=True, capture_output=True, check=check) + +def get_rt_table_name(conn: sqlite3.Connection, iface: str) -> str: + row = conn.execute( + "SELECT rt_table_name_eff FROM v_client_effective WHERE iface=? LIMIT 1;", + (iface,) + ).fetchone() + if not row: + raise RuntimeError(f"Interface not found in DB: {iface}") + return str(row[0]) + +def get_bound_uids(conn: sqlite3.Connection, iface: str) -> list[int]: + rows = conn.execute( + """SELECT ub.uid + FROM User ub + JOIN Iface c ON c.id = ub.iface_id + WHERE c.iface=? AND ub.uid IS NOT NULL + ORDER BY ub.uid;""", + (iface,) + ).fetchall() + return [int(r[0]) for r in rows] + +def iface_down(iface: str) -> str: + if os.geteuid() != 0: + raise PermissionError("This script must be run as root.") + + # Stop interface (ignore failure) + sh(["systemctl", "stop", f"wg-quick@{iface}"]) + + # DB lookups + with ic.open_db() as conn: + table = get_rt_table_name(conn, iface) + uids = get_bound_uids(conn, iface) + + # Snapshot rules once for existence checks + rules = sh(["ip", "-4", "rule", "list"]).stdout + + removed = 0 + for uid in uids: + needle = f"uidrange {uid}-{uid} " + if needle in rules and f" lookup {table}" in rules: + # Try to delete; ignore failure to keep idempotence + sh(["ip", "-4", "rule", "del", "uidrange", f"{uid}-{uid}", "table", table]) + sh(["logger", f"iface_down: removed uid {uid} rule for table {table}"]) + removed += 1 + + return f"✅ {iface} stopped; removed {removed} uid rules from table {table}." + +def main(argv: list[str]) -> int: + if len(argv) != 1: + print(f"Usage: {os.path.basename(sys.argv[0])} ", file=sys.stderr) + return 2 + iface = argv[0] + try: + msg = iface_down(iface) + except (PermissionError, FileNotFoundError, sqlite3.Error, RuntimeError) as e: + print(f"❌ {e}", file=sys.stderr); return 1 + print(msg); return 0 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/iface_status.py b/developer/tunnel-client/iface_status.py new file mode 100755 index 0000000..c0a12e9 --- /dev/null +++ b/developer/tunnel-client/iface_status.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +# iface_status.py — show unit/wg/route/uid-rule status for + +from __future__ import annotations +import os, sys, shutil, sqlite3, subprocess, time +from pathlib import Path +import incommon as ic # provides open_db() + +# --- small shell helpers ----------------------------------------------------- + +def sh(args: list[str]) -> subprocess.CompletedProcess: + """Run command; never raise; text mode; capture stdout/stderr.""" + return subprocess.run(args, text=True, capture_output=True) + +def which(cmd: str) -> bool: + return shutil.which(cmd) is not None + +def print_block(title: str, body: str | None = None) -> None: + print(f"=== {title} ===") + if body is not None and body != "": + print(body.rstrip()) + print() + +# --- DB helpers --------------------------------------------------------------- + +def get_rt_table_name(conn: sqlite3.Connection, iface: str) -> str: + row = conn.execute( + "SELECT rt_table_name_eff FROM v_client_effective WHERE iface=? LIMIT 1;", + (iface,) + ).fetchone() + if not row: + raise RuntimeError(f"Interface not found in DB: {iface}") + return str(row[0]) + +def get_bound_users(conn: sqlite3.Connection, iface: str) -> list[tuple[str, int | None]]: + rows = conn.execute( + """SELECT ub.username, ub.uid + FROM User ub + JOIN Iface c ON c.id = ub.iface_id + WHERE c.iface=? + ORDER BY ub.username;""", + (iface,) + ).fetchall() + return [(str(u), (None if v is None else int(v))) for (u, v) in rows] + +# --- core -------------------------------------------------------------------- + +def iface_status(iface: str) -> int: + # DB open + resolve table name early for helpful errors + with ic.open_db() as conn: + table = get_rt_table_name(conn, iface) + + # systemd status + en = sh(["systemctl", "is-enabled", f"wg-quick@{iface}"]) + ac = sh(["systemctl", "is-active", f"wg-quick@{iface}"]) + sys_body = "\n".join([ + (en.stdout.strip() if en.stdout.strip() else "").strip(), + (ac.stdout.strip() if ac.stdout.strip() else "").strip(), + ]).strip() + print_block(f"systemd: wg-quick@{iface}", sys_body) + + # wg presence + handshake age + wg_title = f"wg: {iface}" + if which("wg"): + if Path(f"/sys/class/net/{iface}").exists(): + lines: list[str] = ["(present)"] + # Try sudo-less handshake read; if not permitted, show hint + hs_try = sh(["sudo", "-n", "wg", "show", iface, "latest-handshakes"]) + if hs_try.returncode == 0 and hs_try.stdout.strip(): + # expected format: " " + epoch_part = hs_try.stdout.strip().split()[-1] + try: + hs = int(epoch_part) + if hs > 0: + age = int(time.time()) - hs + lines.append(f"latest-handshake: {age}s ago") + else: + lines.append("latest-handshake: none") + except ValueError: + lines.append("latest-handshake: unknown") + else: + prog = Path(sys.argv[0]).name or "iface_status.py" + lines.append(f"⚠ need sudo to read peers/handshake (try: sudo {prog} {iface})") + print_block(wg_title, "\n".join(lines)) + else: + print_block(wg_title, "(interface down or not present)") + else: + print_block(wg_title, "wg tool not found.") + + # route for table + rt = sh(["ip", "-4", "route", "show", "table", table]) + print_block(f"route: table {table}", rt.stdout if rt.stdout else "") + + # uid rules targeting table + rules = sh(["ip", "-4", "rule", "show"]).stdout.splitlines() + hits = [ln for ln in rules if f"lookup {table}" in ln] + print_block(f"uid rules → table {table}", "\n".join(hits) if hits else "(none)") + + # DB: bound users + with ic.open_db() as conn: + bound = get_bound_users(conn, iface) + + if not bound: + print_block(f"DB: bound users for {iface}", "(none)") + else: + # simple column render + header = ("username", "uid") + rows = [(u, ("" if v is None else str(v))) for (u, v) in bound] + w1 = max(len(header[0]), *(len(r[0]) for r in rows)) + w2 = max(len(header[1]), *(len(r[1]) for r in rows)) + body_lines = [f"{header[0]:<{w1}} {header[1]:<{w2}}", + f"{'-'*w1} {'-'*w2}"] + body_lines += [f"{u:<{w1}} {v:<{w2}}" for (u, v) in rows] + print_block(f"DB: bound users for {iface}", "\n".join(body_lines)) + + return 0 + +# --- cli --------------------------------------------------------------------- + +def main(argv: list[str]) -> int: + if len(argv) != 1: + print(f"Usage: {Path(sys.argv[0]).name} ", file=sys.stderr) + return 2 + try: + return iface_status(argv[0]) + except (sqlite3.Error, FileNotFoundError, RuntimeError) as e: + print(f"❌ {e}", file=sys.stderr) + return 1 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/iface_up.sh b/developer/tunnel-client/iface_up.sh new file mode 100755 index 0000000..e5dbd0a --- /dev/null +++ b/developer/tunnel-client/iface_up.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# iface_up.sh — enable/start wg-quick@ +set -euo pipefail + +(( $# == 1 )) || { echo "Usage: $0 "; exit 2; } +IFACE="$1" + +# Require root because systemd + net ops +if [[ $EUID -ne 0 ]]; then + echo "❌ This script must be run as root." >&2 + exit 1 +fi + +# Sanity: config must exist +[[ -r "/etc/wireguard/${IFACE}.conf" ]] || { + echo "❌ Missing: /etc/wireguard/${IFACE}.conf"; exit 1; } + +# Bring it up +systemctl enable --now "wg-quick@${IFACE}" + +# Quick confirmation +systemctl is-active --quiet "wg-quick@${IFACE}" \ + && echo "✅ ${IFACE} is active." \ + || { echo "⚠️ ${IFACE} failed to start."; exit 1; } diff --git a/developer/tunnel-client/incommon.py b/developer/tunnel-client/incommon.py new file mode 100644 index 0000000..a67a0aa --- /dev/null +++ b/developer/tunnel-client/incommon.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# Shared helpers (DB path + small SQLite utilities). No side effects on import. + +from __future__ import annotations +from pathlib import Path +import sqlite3 +from typing import Iterable, Sequence, Any, List, Tuple, Optional + +# Base paths +ROOT_DIR: Path = Path(__file__).resolve().parent +DB_PATH: Path = ROOT_DIR / "db" / "store" # default location + +def open_db(path: Optional[Path]=None) -> sqlite3.Connection: + p = path or DB_PATH + if not p.exists(): + raise FileNotFoundError(f"DB not found: {p}") + conn = sqlite3.connect(p.as_posix()) + # enforce FK; journal mode is set by schema, but enabling FK here is harmless and desired + conn.execute("PRAGMA foreign_keys = ON;") + return conn + +def rows(conn: sqlite3.Connection, sql: str, params: Sequence[Any]=()) -> List[tuple]: + cur = conn.execute(sql, tuple(params)) + out = cur.fetchall() + cur.close() + return out + +def get_client_id(conn: sqlite3.Connection, iface: str) -> int: + r = conn.execute("SELECT id FROM Iface WHERE iface=? LIMIT 1;", (iface,)).fetchone() + if not r: raise RuntimeError(f"client iface not found: {iface}") + return int(r[0]) + +# Tx helpers (optional but nice) +def begin_immediate(conn: sqlite3.Connection) -> None: + conn.execute("BEGIN IMMEDIATE;") + +def commit(conn: sqlite3.Connection) -> None: + conn.commit() + diff --git a/developer/tunnel-client/inspect.sh b/developer/tunnel-client/inspect.sh new file mode 100755 index 0000000..be2d5ef --- /dev/null +++ b/developer/tunnel-client/inspect.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# inspect.sh — prime sudo only if needed, then run inspect_1.py +set -euo pipefail +SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" + +# If not primed, prompt via the tty (works inside Emacs shell without echoing) +if ! sudo -n true 2>/dev/null; then + sudo echo -n +fi + +sudo python3 "${SCRIPT_DIR}/inspect_1.py" "$@" diff --git a/developer/tunnel-client/inspect_1.py b/developer/tunnel-client/inspect_1.py new file mode 100755 index 0000000..e6a179a --- /dev/null +++ b/developer/tunnel-client/inspect_1.py @@ -0,0 +1,362 @@ +#!/usr/bin/env python3 +# inspect.py — deep health: DB + systemd/drop-in + wg + route + uid rules + DNS plug + +from __future__ import annotations +import os, sys, re, time, shutil, sqlite3, subprocess +from pathlib import Path +from typing import List, Tuple, Optional +import incommon as ic # open_db() + +# ---------- small shell helpers ---------- + +def sh(args: List[str]) -> subprocess.CompletedProcess: + """Run command; never raise; text mode; capture stdout/stderr.""" + return subprocess.run(args, text=True, capture_output=True) + +def which(cmd: str) -> bool: + return shutil.which(cmd) is not None + +def print_block(title: str, body: str | None = None) -> None: + print(f"=== {title} ===") + if body: print(body.rstrip()) + print() + +def format_table(headers: List[str], rows: List[Tuple]) -> str: + cols = list(zip(*([headers] + [[str(c) for c in r] for r in rows]))) if rows else [headers] + widths = [max(len(x) for x in col) for col in cols] + line = lambda r: " ".join(f"{str(c):<{w}}" for c, w in zip(r, widths)) + out = [line(headers), line(tuple("-"*w for w in widths))] + for r in rows: out.append(line(tuple("" if c is None else str(c) for c in r))) + return "\n".join(out) + +# ---------- DB helpers ---------- + +def client_row(conn: sqlite3.Connection, iface: str): + return conn.execute(""" + SELECT c.iface, + v.rt_table_name_eff AS rt_table_name, + c.bound_user, c.bound_uid, + c.local_address_cidr, + substr(c.public_key,1,10)||'…' AS pub, + c.autostart, c.updated_at + FROM Iface c + JOIN v_client_effective v ON v.id=c.id + WHERE c.iface=? LIMIT 1; + """,(iface,)).fetchone() + +def server_rows(conn: sqlite3.Connection, iface: str) -> List[tuple]: + return conn.execute(""" + SELECT s.name, + s.endpoint_host || ':' || s.endpoint_port AS endpoint, + substr(s.public_key,1,10)||'…' AS pub, + s.allowed_ips, s.keepalive_s, s.priority + FROM server s + JOIN Iface c ON c.id=s.iface_id + WHERE c.iface=? + ORDER BY s.priority, s.name; + """,(iface,)).fetchall() + +def rtname_and_cidr(conn: sqlite3.Connection, iface: str) -> Tuple[str, str]: + row = conn.execute("SELECT rt_table_name_eff, local_address_cidr FROM v_client_effective WHERE iface=? LIMIT 1;",(iface,)).fetchone() + if not row: raise RuntimeError(f"Interface not found in DB: {iface}") + return str(row[0]), str(row[1]) + +def bound_uids(conn: sqlite3.Connection, iface: str) -> List[int]: + rows = conn.execute(""" + SELECT ub.uid + FROM User ub + JOIN Iface c ON c.id=ub.iface_id + WHERE c.iface=? AND ub.uid IS NOT NULL AND ub.uid!='' + ORDER BY ub.uid; + """,(iface,)).fetchall() + return [int(r[0]) for r in rows] + +def legacy_bound_uid(conn: sqlite3.Connection, iface: str) -> Optional[int]: + r = conn.execute("SELECT bound_uid FROM Iface WHERE iface=? AND bound_uid IS NOT NULL AND bound_uid!='';",(iface,)).fetchone() + return (int(r[0]) if r and r[0] is not None and str(r[0])!="" else None) + +def primary_server_ep_and_allowed(conn: sqlite3.Connection, iface: str) -> Tuple[str,str]: + ep = conn.execute(""" + SELECT s.endpoint_host||':'||s.endpoint_port + FROM server s JOIN Iface c ON c.id=s.iface_id + WHERE c.iface=? ORDER BY s.priority, s.name LIMIT 1; + """,(iface,)).fetchone() + allow = conn.execute(""" + SELECT s.allowed_ips + FROM server s JOIN Iface c ON c.id=s.iface_id + WHERE c.iface=? ORDER BY s.priority, s.name LIMIT 1; + """,(iface,)).fetchone() + return (str(ep[0]) if ep and ep[0] else ""), (str(allow[0]) if allow and allow[0] else "") + +# ---------- file checks ---------- + +def check_file(path: str, mode_oct: int, user: str, group: str) -> str: + p = Path(path) + if not p.exists(): return f"WARN: missing {path}" + try: + st = p.stat() + actual_mode = st.st_mode & 0o777 + import pwd, grp + u = pwd.getpwuid(st.st_uid).pw_name + g = grp.getgrgid(st.st_gid).gr_name + want = f"{oct(mode_oct)[2:]} {user} {group}" + got = f"{oct(actual_mode)[2:]} {u} {g}" + if actual_mode==mode_oct and u==user and g==group: + return f"OK: {path} ({got})" + else: + return f"WARN: {path} perms/owner {got} (expected {want})" + except Exception as e: + return f"WARN: {path} stat error: {e}" + +def rt_tables_has(table: str) -> bool: + try: + txt = Path("/etc/iproute2/rt_tables").read_text() + except Exception: + return False + pat = re.compile(rf"^\s*\d+\s+{re.escape(table)}\s*$", re.M) + return pat.search(txt) is not None + +# ---------- wg helpers ---------- + +def wg_present(iface: str) -> bool: + return Path(f"/sys/class/net/{iface}").exists() + +def wg_handshake_age_sec(iface: str) -> Optional[int]: + cp = sh(["sudo","-n","wg","show",iface,"latest-handshakes"]) + if cp.returncode != 0 or not cp.stdout.strip(): return None + try: + epoch = int(cp.stdout.split()[-1]) + if epoch<=0: return None + return int(time.time()) - epoch + except Exception: + return None + +def wg_endpoints_joined(iface: str) -> str: + cp = sh(["sudo","-n","wg","show",iface,"endpoints"]) + if cp.returncode != 0: return "" + vals = [] + for line in cp.stdout.splitlines(): + parts = line.split() + if len(parts)>=2: vals.append(parts[1]) + return "".join(vals) + +def wg_allowedips_csv(iface: str) -> str: + cp = sh(["sudo","-n","wg","show",iface,"allowed-ips"]) + if cp.returncode != 0: return "" + vals=[] + for line in cp.stdout.splitlines(): + parts = line.split() + if len(parts)>=2: vals.append(parts[1]) + return ",".join(vals) + +# ---------- redact helpers ---------- + +def redact_conf(text: str) -> str: + text = re.sub(r"^(PrivateKey\s*=\s*).+$", r"\1", text, flags=re.M) + text = re.sub(r"^(PresharedKey\s*=\s*).+$", r"\1", text, flags=re.M) + return text + +def sudo_cat(path: str) -> Optional[str]: + cp = sh(["sudo","-n","cat", path]) + if cp.returncode != 0: return None + return cp.stdout + +# ---------- main inspect ---------- + +def inspect_iface(iface: str) -> int: + # DB open + with ic.open_db() as conn: + crow = client_row(conn, iface) + if not crow: + print(f"❌ client row not found for iface={iface}", file=sys.stderr); return 1 + srv_rows = server_rows(conn, iface) + rtname, local_cidr = rtname_and_cidr(conn, iface) + local_ip = local_cidr.split("/",1)[0] + db_ep, db_allowed = primary_server_ep_and_allowed(conn, iface) + uids = bound_uids(conn, iface) + leg = legacy_bound_uid(conn, iface) + if leg is not None: uids.append(leg) + + # DB snapshot + print("=== DB: client '{}' ===".format(iface)) + headers = ["iface","rt_table_name","bound_user","bound_uid","local_address_cidr","pub","autostart","updated_at"] + print(format_table(headers, [crow])) + print() + print(f"--- server for '{iface}' ---") + if srv_rows: + print(format_table(["name","endpoint","pub","allowed_ips","keepalive_s","priority"], srv_rows)) + else: + print("(none)") + print() + + # systemd + drop-in + print(f"=== systemd: wg-quick@{iface} ===") + if which("systemctl"): + en = sh(["systemctl","is-enabled",f"wg-quick@{iface}"]).stdout.strip() + ac = sh(["systemctl","is-active", f"wg-quick@{iface}"]).stdout.strip() + if en: print(en) + if ac: print(ac) + drop_dir = f"/etc/systemd/system/wg-quick@{iface}.service.d" + # common filenames: legacy 'restart.conf' or new '10-postup-IP-scripts.conf' + candidates = [f"{drop_dir}/restart.conf", f"{drop_dir}/10-postup-IP-scripts.conf"] + print(f"-- drop-in expected: {candidates[0]}") + found = [p for p in candidates if Path(p).is_file()] + if found: + print("OK: drop-in file exists") + else: + print("WARN: drop-in file missing or unreadable") + dpaths = sh(["systemctl","show",f"wg-quick@{iface}","-p","DropInPaths","--value"]).stdout.strip() + if dpaths and any(p in dpaths for p in candidates): + print("OK: drop-in is loaded by systemd") + else: + print("WARN: drop-in not reported by systemd (need daemon-reload?)") + else: + print("(systemctl not available)") + print() + + # installed targets + print("=== installed targets ===") + print(check_file(f"/etc/wireguard/{iface}.conf", 0o600, "root", "root")) + # check both possible drop-in names + d1 = check_file(f"/etc/systemd/system/wg-quick@{iface}.service.d/restart.conf", 0o644, "root", "root") + d2 = check_file(f"/etc/systemd/system/wg-quick@{iface}.service.d/10-postup-IP-scripts.conf", 0o644, "root", "root") + # show OK if either exists + if d1.startswith("OK") or d2.startswith("OK"): + print(d1 if d1.startswith("OK") else d2) + else: + # print both warnings for clarity + print(d1); print(d2) + print(check_file("/usr/local/bin/IP_rule_add_UID.sh", 0o500, "root", "root")) + print(check_file(f"/usr/local/bin/route_init_{iface}.sh", 0o500, "root", "root")) + print("OK: rt_tables entry for '{}' present".format(rtname) if rt_tables_has(rtname) + else f"WARN: rt_tables entry for '{rtname}' missing") + print() + + # wg + addr + print(f"=== wg + addr: {iface} ===") + present = wg_present(iface) + print("(present)" if present else "(interface down or not present)") + if present: + has_ip = sh(["ip","-4","addr","show","dev",iface]).stdout.find(f" {local_ip}/")>=0 + print(f"OK: {iface} has {local_ip}" if has_ip else f"WARN: {iface} missing {local_ip}") + if which("wg"): + age = wg_handshake_age_sec(iface) + if age is None: + print("latest-handshake: none") + else: + print(f"latest-handshake: {age}s ago") + if age>600: print("WARN: handshake is stale (>600s)") + # endpoint and allowed-ips comparison (requires sudo) + wg_ep = wg_endpoints_joined(iface) + if db_ep: + if wg_ep == db_ep: + print(f"OK: endpoint matches DB ({wg_ep})") + else: + print(f"WARN: endpoint mismatch (wg={wg_ep or 'n/a'} db={db_ep})") + wg_allowed = wg_allowedips_csv(iface) + if db_allowed: + if wg_allowed == db_allowed: + print(f"OK: allowed-ips match DB ({wg_allowed})") + else: + print(f"WARN: allowed-ips mismatch (wg={wg_allowed or 'n/a'} db={db_allowed})") + else: + prog = Path(sys.argv[0]).name + print(f"⚠ need sudo for handshake/peer checks (try: sudo {prog} {iface})") + print() + + # route table checks + print(f"=== route: table {rtname} ===") + rt = sh(["ip","-4","route","show","table",rtname]).stdout + print(rt or "") + def_ok = any(re.match(rf"^default\s+dev\s+{re.escape(iface)}\b", ln) for ln in rt.splitlines()) + bh_ok = any(re.match(r"^blackhole\s+default\b", ln) for ln in rt.splitlines()) + print("OK: default -> {}".format(iface) if def_ok else f"WARN: default route not on {iface}") + print("OK: blackhole guard present" if bh_ok else "WARN: blackhole guard missing") + print() + + # uid rules + print(f"=== ip rules for bound UIDs → table {rtname} ===") + rules_txt = sh(["ip","-4","rule","show"]).stdout + if uids: + for u in uids: + if re.search(rf"uidrange {u}-{u}.*lookup {re.escape(rtname)}", rules_txt): + print(f"OK: uid {u} -> table {rtname}") + else: + print(f"WARN: missing rule for uid {u} -> table {rtname}") + else: + print("(no bound UIDs recorded)") + print() + print(f"=== ip rule lines targeting '{rtname}' (all) ===") + hit_lines = [ln for ln in rules_txt.splitlines() if f"lookup {rtname}" in ln] + print("\n".join(hit_lines) if hit_lines else "(none)") + print() + + # DNS leak plug: iptables redirects + print("=== iptables nat OUTPUT DNS redirect (→ 127.0.0.1:53) ===") + if which("iptables"): + nat = sh(["iptables","-t","nat","-S","OUTPUT"]).stdout + r_udp = re.search(r"-A OUTPUT.*-p udp .* --dport 53 .* REDIRECT .*to-ports 53", nat or "") + r_tcp = re.search(r"-A OUTPUT.*-p tcp .* --dport 53 .* REDIRECT .*to-ports 53", nat or "") + print(r_udp.group(0) if r_udp else "WARN: no UDP:53 redirect") + print(r_tcp.group(0) if r_tcp else "WARN: no TCP:53 redirect") + else: + print("(iptables not available)") + print() + + # on-disk configs (redacted) + conf = f"/etc/wireguard/{iface}.conf" + drop_restart = f"/etc/systemd/system/wg-quick@{iface}.service.d/restart.conf" + drop_postup = f"/etc/systemd/system/wg-quick@{iface}.service.d/10-postup-IP-scripts.conf" + + print(f"=== file: {conf} (redacted) ===") + txt = sudo_cat(conf) + if txt is None: + print("(missing or unreadable; need sudo to view)") + else: + print(redact_conf(txt)) + print() + + pick_drop = drop_restart if Path(drop_restart).exists() else drop_postup + print(f"=== file: {pick_drop} (hooks) ===") + txt = sudo_cat(pick_drop) + if txt is None: + print("(missing or unreadable; need sudo to view)") + else: + # Show only interesting service lines if present + lines = [ln for ln in txt.splitlines() + if ln.startswith(("ExecStart","Restart","RestartSec","ExecStartPre","ExecStartPost"))] + print("\n".join(lines) if lines else txt) + print() + + # summary verdict + print("=== summary ===") + ok = True + ok &= def_ok + ok &= bh_ok + if uids: + for u in uids: + if not re.search(rf"uidrange {u}-{u}.*lookup {re.escape(rtname)}", rules_txt): ok = False + ok &= rt_tables_has(rtname) + ok &= Path(f"/etc/wireguard/{iface}.conf").exists() + ok &= (Path(drop_restart).exists() or Path(drop_postup).exists()) + ok &= wg_present(iface) + if db_ep and which("wg"): + # If wg is present and sudo works, compare endpoint; otherwise skip + wg_ep = wg_endpoints_joined(iface) + if wg_ep and wg_ep != db_ep: ok = False + print("✅ Looks consistent for '{}'.".format(iface) if ok else "⚠️ Something is off — check WARN lines above.") + return 0 if ok else 1 + +# ---------- cli ---------- + +def main(argv: List[str]) -> int: + if len(argv)!=1: + print(f"Usage: {Path(sys.argv[0]).name} ", file=sys.stderr) + return 2 + try: + return inspect_iface(argv[0]) + except (sqlite3.Error, FileNotFoundError, RuntimeError) as e: + print(f"❌ {e}", file=sys.stderr); return 1 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/inspect_client_public_key.py b/developer/tunnel-client/inspect_client_public_key.py new file mode 100755 index 0000000..95a3803 --- /dev/null +++ b/developer/tunnel-client/inspect_client_public_key.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 +# inspect_client_public_key.py — show the client's WireGuard public key for one iface +# Sources checked (in this order): DB, staged conf, installed conf, kernel +# The “client public key” is generated locally from the client’s PrivateKey and must be +# copied to the **server** as the peer’s PublicKey in the server’s WireGuard config. + +from __future__ import annotations +from pathlib import Path +from typing import List, Optional, Tuple +import argparse +import os +import subprocess +import sqlite3 +import sys + +# Project helper providing DB_PATH and open_db() +import incommon as ic + +ROOT = Path(__file__).resolve().parent +DEFAULT_STAGE = ROOT / "stage" +LIVE_WG_DIR = Path("/etc/wireguard") + +def _is_root() -> bool: + return os.geteuid() == 0 + +def _format_table(headers: List[str], rows: List[Tuple]) -> str: + if not rows: + return "(none)" + cols = list(zip(*([headers] + [[("" if c is None else str(c)) for c in r] for r in rows]))) + widths = [max(len(x) for x in col) for col in cols] + def line(r): return " ".join(f"{str(c):<{w}}" for c, w in zip(r, widths)) + out = [line(headers), line(tuple("-"*w for w in widths))] + for r in rows: + out.append(line(r)) + return "\n".join(out) + +def _read_conf_private_key(conf_path: Path) -> Optional[str]: + """Return the PrivateKey value from a wg conf (first [Interface] block), or None.""" + try: + txt = conf_path.read_text() + except FileNotFoundError: + return None + section = None + for raw in txt.splitlines(): + line = raw.strip() + if not line or line.startswith("#") or line.startswith(";"): + continue + if line.startswith("[") and line.endswith("]"): + section = line[1:-1].strip() + continue + if section == "Interface": + if line.lower().startswith("privatekey"): + parts = line.split("=", 1) + if len(parts) == 2: + val = parts[1].strip() + return val if val else None + return None + +def _pub_from_private_key(priv: str) -> Optional[str]: + """Compute public key from a WireGuard base64 private key using `wg pubkey`.""" + if not priv: + return None + try: + cp = subprocess.run( + ["wg", "pubkey"], + input=(priv + "\n").encode("utf-8"), + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + check=True, + ) + pub = cp.stdout.decode("utf-8", "replace").strip() + return pub or None + except (subprocess.CalledProcessError, FileNotFoundError): + return None + +def _kernel_iface_public_key(iface: str) -> Optional[str]: + try: + cp = subprocess.run( + ["wg", "show", iface, "public-key"], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + check=True, + ) + k = cp.stdout.decode("utf-8", "replace").strip() + return k or None + except (subprocess.CalledProcessError, FileNotFoundError): + return None + +def _db_client_public_key(conn: sqlite3.Connection, iface: str) -> Optional[str]: + row = conn.execute("SELECT public_key FROM Iface WHERE iface=? LIMIT 1;", (iface,)).fetchone() + if not row: + return None + k = row[0] + return k if k else None + +def _rel_from_stage(path: Path, stage_root: Path) -> str: + """Return a short, stage-relative display path when under stage_root.""" + try: + rel = path.relative_to(stage_root) + return str(rel) + except ValueError: + return str(path) + +def _gather(iface: str, stage_root: Path) -> Tuple[List[Tuple[str, str, str]], List[str]]: + """ + Return (rows, notes) + rows: list of (source, location, public_key or "(missing)") + """ + notes: List[str] = [] + + # DB + db_pub: Optional[str] = None + if ic.DB_PATH.exists(): + try: + with ic.open_db() as conn: + db_pub = _db_client_public_key(conn, iface) + except sqlite3.Error as e: + notes.append(f"DB error: {e}") + else: + notes.append(f"DB not found at {ic.DB_PATH}") + + # staged conf -> derive pub from PrivateKey + staged_conf = stage_root / "etc" / "wireguard" / f"{iface}.conf" + staged_priv = _read_conf_private_key(staged_conf) + staged_pub = _pub_from_private_key(staged_priv) if staged_priv else None + if staged_priv is None and staged_conf.exists(): + notes.append(f"staged conf present but PrivateKey missing: { _rel_from_stage(staged_conf, stage_root) }") + + # live conf -> derive pub from PrivateKey + live_conf = LIVE_WG_DIR / f"{iface}.conf" + live_priv = _read_conf_private_key(live_conf) + live_pub = _pub_from_private_key(live_priv) if live_priv else None + if live_conf.exists() and live_priv is None: + notes.append(f"installed conf present but PrivateKey missing: {live_conf}") + + # kernel + kern_pub = _kernel_iface_public_key(iface) + + rows: List[Tuple[str, str, str]] = [] + rows.append(("DB", f"Iface.public_key[{iface}]", db_pub or "(missing)")) + rows.append(("Stage", _rel_from_stage(staged_conf, stage_root), + staged_pub or ("(missing)" if not staged_conf.exists() else "(could not derive)"))) + rows.append(("Installed", str(live_conf), + live_pub or ("(missing)" if not live_conf.exists() else "(could not derive)"))) + rows.append(("Kernel", f"wg show {iface} public-key", kern_pub or "(missing)")) + + # Quick consistency summary + present = [v for _s, _loc, v in rows if not v.startswith("(")] + if len(present) >= 2: + all_same = all(v == present[0] for v in present[1:]) + if all_same: + notes.append("All present sources agree.") + else: + notes.append("Mismatch detected between sources.") + elif len(present) == 1: + notes.append("Only one source has a key (cannot check consistency).") + else: + notes.append("No source has a client public key.") + + return (rows, notes) + +def inspect_client_public_key(iface: str, stage_root: Optional[Path] = None) -> str: + """ + Business function: returns a formatted report string. + """ + sr = stage_root or DEFAULT_STAGE + rows, notes = _gather(iface, sr) + + header = ( + f"Client public key inspection for iface '{iface}'\n" + "This public key is generated locally from the client’s PrivateKey and must be\n" + "installed on the *server* as the peer’s PublicKey in the server’s WireGuard config.\n" + ) + table = _format_table(["source", "where", "public_key"], rows) + if notes: + note_block = "\nNotes:\n- " + "\n- ".join(notes) + else: + note_block = "" + return f"{header}\n{table}\n{note_block}\n" + +def main(argv: Optional[List[str]] = None) -> int: + ap = argparse.ArgumentParser( + description="Inspect the client’s WireGuard public key for a single interface." + ) + # Make iface optional so we can aggregate errors ourselves + ap.add_argument("iface", nargs="?", help="interface name (e.g., x6)") + ap.add_argument("--stage-root", default=str(DEFAULT_STAGE), help="stage directory (default: ./stage)") + args = ap.parse_args(argv) + + # Aggregate invocation errors + errors: List[str] = [] + if not _is_root(): + errors.append("must run as root (needs access to /etc/wireguard and wg)") + if not args.iface: + errors.append("missing required positional argument: iface") + if args.stage_root: + sr = Path(args.stage_root) + if not sr.exists(): + errors.append(f"--stage-root does not exist: {sr}") + elif not sr.is_dir(): + errors.append(f"--stage-root is not a directory: {sr}") + + if errors: + ap.print_usage(sys.stderr) + print(f"{ap.prog}: error: " + "; ".join(errors), file=sys.stderr) + return 2 + + try: + report = inspect_client_public_key(args.iface, Path(args.stage_root)) + print(report, end="") + return 0 + except Exception as e: + print(f"❌ {e}", file=sys.stderr) + return 2 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/developer/tunnel-client/install_staged_tree.py b/developer/tunnel-client/install_staged_tree.py new file mode 100755 index 0000000..e1225d5 --- /dev/null +++ b/developer/tunnel-client/install_staged_tree.py @@ -0,0 +1,245 @@ +#!/usr/bin/env python3 +""" +install_staged_tree.py + +A dumb installer: copy staged files into the target root with backups and +deterministic permissions. No systemd stop/start, no daemon-reload. + +Given: + - A staged tree (default: ./stage) containing any of: + /usr/local/bin/apply_ip_state.sh + /etc/wireguard/*.conf + /etc/systemd/system/wg-quick@IFACE.service.d/*.conf + /etc/iproute2/rt_tables + - A destination root (default: /). Parent dirs may be created with --create-dirs. + +Does: + - For each whitelisted staged file: + * if a target already exists, copy it back into the stage as a timestamped backup + * atomically replace target with staged version + * set root:root ownership (best-effort) and explicit permissions + - Prints a summary and suggests next steps (e.g., ./start_iface.py ) + +Returns: + - Exit 0 on success; non-zero on error +""" + +from __future__ import annotations +from pathlib import Path +from typing import Dict, Iterable, List, Optional, Sequence, Tuple +import argparse +import datetime as dt +import hashlib +import os +import shutil +import sys + +ROOT = Path(__file__).resolve().parent +DEFAULT_STAGE = ROOT / "stage" + +# Whitelisted install targets → mode +# (These are *relative* to the stage root) +MODE_RULES: List[Tuple[str, int]] = [ + ("usr/local/bin", 0o500), # files under here (scripts) + ("etc/wireguard", 0o600), # *.conf + ("etc/systemd/system", 0o644), # wg-quick@*.service.d/*.conf + ("etc/iproute2", 0o644), # rt_tables +] + +def _sha256(path: Path) -> str: + h = hashlib.sha256() + with path.open("rb") as f: + for chunk in iter(lambda: f.read(1<<20), b""): + h.update(chunk) + return h.hexdigest() + +def _ensure_parents(dest_root: Path, rel: Path, create: bool) -> None: + parent = (dest_root / rel).parent + if parent.exists(): + return + if not create: + raise RuntimeError(f"missing parent directory: {parent}") + parent.mkdir(parents=True, exist_ok=True) + +def _backup_existing_to_stage(stage_root: Path, dest_root: Path, rel: Path) -> Optional[Path]: + """If target exists, copy it back into stage/_backups// and return backup path.""" + target = dest_root / rel + if not target.exists(): + return None + ts = dt.datetime.utcnow().strftime("%Y%m%dT%H%M%SZ") + backup = stage_root / "_backups" / ts / rel + backup.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(target, backup) + return backup + +def _atomic_install(src: Path, dst: Path, mode: int) -> None: + tmp = dst.with_suffix(dst.suffix + ".tmp") + shutil.copyfile(src, tmp) + os.chmod(tmp, mode) + try: + os.chown(tmp, 0, 0) # best-effort; may fail if not root + except PermissionError: + pass + os.replace(tmp, dst) + +def _mode_for_rel(rel: Path) -> Optional[int]: + """Choose a mode based on the relative path bucket.""" + s = str(rel) + if s.startswith("usr/local/bin/"): + return 0o500 + if s.startswith("etc/wireguard/") and rel.suffix == ".conf": + return 0o600 + if s == "etc/iproute2/rt_tables": + return 0o644 + if s.startswith("etc/systemd/system/") and s.endswith(".conf"): + return 0o644 + return None + +def _iter_stage_targets(stage_root: Path) -> List[Path]: + """Return a list of *relative* paths under stage that match our whitelist.""" + rels: List[Path] = [] + + # /usr/local/bin/* + bin_dir = stage_root / "usr" / "local" / "bin" + if bin_dir.is_dir(): + for p in sorted(bin_dir.glob("*")): + if p.is_file(): + rels.append(p.relative_to(stage_root)) + + # /etc/wireguard/*.conf + wg_dir = stage_root / "etc" / "wireguard" + if wg_dir.is_dir(): + for p in sorted(wg_dir.glob("*.conf")): + rels.append(p.relative_to(stage_root)) + + # /etc/systemd/system/wg-quick@*.service.d/*.conf + sysd_dir = stage_root / "etc" / "systemd" / "system" + if sysd_dir.is_dir(): + for p in sorted(sysd_dir.rglob("wg-quick@*.service.d/*.conf")): + rels.append(p.relative_to(stage_root)) + + # /etc/iproute2/rt_tables + rt = stage_root / "etc" / "iproute2" / "rt_tables" + if rt.is_file(): + rels.append(rt.relative_to(stage_root)) + + return rels + +def _discover_ifaces_from_stage(stage_root: Path) -> List[str]: + """Peek into staged artifacts to guess iface names (for friendly next-steps).""" + names = set() + + # from /etc/wireguard/.conf + wg_dir = stage_root / "etc" / "wireguard" + if wg_dir.is_dir(): + for p in wg_dir.glob("*.conf"): + names.add(p.stem) + + # from /etc/systemd/system/wg-quick@.service.d/ + sysd = stage_root / "etc" / "systemd" / "system" + if sysd.is_dir(): + for d in sysd.glob("wg-quick@*.service.d"): + name = d.name + # name looks like: wg-quick@X.service.d + at = name.find("@") + dot = name.find(".service.d") + if at != -1 and dot != -1 and dot > at: + names.add(name[at+1:dot]) + + return sorted(names) + +def install_staged_tree( + stage_root: Path, + dest_root: Path, + create_dirs: bool = False, + skip_identical: bool = True, +) -> Tuple[List[str], List[str]]: + """ + Copy files from stage_root to dest_root. + Returns (logs, detected_ifaces). + """ + old_umask = os.umask(0o077) + logs: List[str] = [] + try: + staged = _iter_stage_targets(stage_root) + if not staged: + raise RuntimeError("nothing to install (stage is empty or whitelist didn’t match)") + + for rel in staged: + src = stage_root / rel + dst = dest_root / rel + + mode = _mode_for_rel(rel) + if mode is None: + logs.append(f"skip (not whitelisted): {rel}") + continue + + _ensure_parents(dest_root, rel, create_dirs) + + backup = _backup_existing_to_stage(stage_root, dest_root, rel) + if backup: + logs.append(f"backup: {dst} -> {backup}") + + if skip_identical and dst.exists(): + try: + if _sha256(src) == _sha256(dst): + logs.append(f"identical: skip {rel}") + continue + except Exception: + pass + + _atomic_install(src, dst, mode) + logs.append(f"install: {rel} (mode {oct(mode)})") + + ifaces = _discover_ifaces_from_stage(stage_root) + return (logs, ifaces) + finally: + os.umask(old_umask) + +def _require_root(allow_nonroot: bool) -> None: + if not allow_nonroot and os.geteuid() != 0: + raise RuntimeError("must run as root (use --force-nonroot to override)") + +def main(argv: Optional[Sequence[str]] = None) -> int: + ap = argparse.ArgumentParser(description="Install staged artifacts into a target root. No service control.") + ap.add_argument("--stage", default=str(DEFAULT_STAGE)) + ap.add_argument("--root", default="/") + ap.add_argument("--create-dirs", action="store_true", help="create missing parent directories") + ap.add_argument("--no-skip-identical", action="store_true", help="always replace even if content identical") + ap.add_argument("--force-nonroot", action="store_true", help="allow non-root install (ownership may be wrong)") + args = ap.parse_args(argv) + + try: + _require_root(allow_nonroot=args.force_nonroot) + logs, ifaces = install_staged_tree( + stage_root=Path(args.stage), + dest_root=Path(args.root), + create_dirs=args.create_dirs, + skip_identical=(not args.no_skip_identical), + ) + for line in logs: + print(line) + + # Summary + suggested next steps + print("\n=== Summary ===") + print(f"Installed {sum(1 for l in logs if l.startswith('install:'))} file(s).") + if ifaces: + lst = " ".join(ifaces) + print(f"Detected interfaces from stage: {lst}") + print(f"\nNext steps:") + print(f" # (optional) verify configs") + print(f" sudo wg-quick strip /etc/wireguard/{ifaces[0]}.conf >/dev/null 2>&1 || true") + print(f"\n # start interfaces") + print(f" sudo ./start_iface.py {lst}") + else: + print("No interfaces detected in staged artifacts.") + print("\nNext steps:") + print(" # start your interface(s)") + print(" sudo ./start_iface.py [more ifaces]") + return 0 + except Exception as e: + print(f"❌ install failed: {e}", file=sys.stderr) + return 2 + +if __name__ == "__main__": + sys.exit(main()) diff --git a/developer/tunnel-client/key/.gitignore b/developer/tunnel-client/key/.gitignore new file mode 100644 index 0000000..53642ce --- /dev/null +++ b/developer/tunnel-client/key/.gitignore @@ -0,0 +1,4 @@ + +* +!.gitignore + diff --git a/developer/tunnel-client/key_client_generate.py b/developer/tunnel-client/key_client_generate.py new file mode 100755 index 0000000..96df023 --- /dev/null +++ b/developer/tunnel-client/key_client_generate.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# key_client_generate.py — generate a machine-wide WG keypair +# Usage: ./key_client_generate.py +# - Writes private key to: key/ +# - Updates ALL client.public_key in local DB (no private key stored in DB) + +from __future__ import annotations +import sys, shutil, subprocess, sqlite3, os +from pathlib import Path +import incommon as ic # ROOT_DIR, DB_PATH, open_db() + +def generate_keypair() -> tuple[str, str]: + if not shutil.which("wg"): + raise RuntimeError("wg not found; install wireguard-tools") + priv = subprocess.run(["wg","genkey"], check=True, text=True, capture_output=True).stdout.strip() + pub = subprocess.run(["wg","pubkey"], check=True, input=priv.encode(), capture_output=True).stdout.decode().strip() + # quick sanity + if not (43 <= len(pub) <= 45): + raise RuntimeError(f"generated public key length looks wrong ({len(pub)})") + return priv, pub + +def write_private_key(machine: str, private_key: str) -> Path: + key_dir = ic.ROOT_DIR / "key" + key_dir.mkdir(parents=True, exist_ok=True) + out_path = key_dir / machine + if out_path.exists(): + raise FileExistsError(f"refusing to overwrite existing private key file: {out_path}") + with open(out_path, "w", encoding="utf-8") as f: + f.write(private_key + "\n") + os.chmod(out_path, 0o600) + return out_path + +def update_client_public_keys(pub: str) -> int: + if not ic.DB_PATH.exists(): + raise FileNotFoundError(f"DB not found: {ic.DB_PATH}") + with ic.open_db() as conn: + cur = conn.execute( + "UPDATE Iface " + " SET public_key=?, updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now');", + (pub,) + ) + conn.commit() + return cur.rowcount or 0 + +def main(argv: list[str]) -> int: + if len(argv) != 1: + print(f"Usage: {Path(sys.argv[0]).name} ", file=sys.stderr) + return 2 + machine = argv[0] + try: + priv, pub = generate_keypair() + out_path = write_private_key(machine, priv) + n = update_client_public_keys(pub) + print(f"wrote: {out_path.relative_to(ic.ROOT_DIR)} (600)") + print(f"updated client.public_key for {n} row(s)") + print(f"public_key: {pub}") + return 0 + except (RuntimeError, FileExistsError, FileNotFoundError, sqlite3.Error, subprocess.CalledProcessError) as e: + print(f"❌ {e}", file=sys.stderr) + return 1 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/key_server_set.py b/developer/tunnel-client/key_server_set.py new file mode 100755 index 0000000..f53022e --- /dev/null +++ b/developer/tunnel-client/key_server_set.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +# key_server_set.py — set a server's public key by nickname +# Usage: ./key_server_set.py + +from __future__ import annotations +import sys, sqlite3 +from pathlib import Path +import incommon as ic # DB_PATH, open_db() + +def valid_pub(pub: str) -> bool: + # wg public keys are base64-like and typically 44 chars; allow 43–45 as used elsewhere + return isinstance(pub, str) and (43 <= len(pub.strip()) <= 45) + +def set_server_pubkey(server_name: str, pubkey: str) -> int: + if not ic.DB_PATH.exists(): + raise FileNotFoundError(f"DB not found: {ic.DB_PATH}") + with ic.open_db() as conn: + cur = conn.execute( + "UPDATE server " + " SET public_key=?, updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') " + " WHERE name=?;", + (pubkey.strip(), server_name) + ) + conn.commit() + return cur.rowcount or 0 + +def main(argv: list[str]) -> int: + if len(argv) != 2: + print(f"Usage: {Path(sys.argv[0]).name} ", file=sys.stderr) + return 2 + name, pub = argv + if not valid_pub(pub): + print(f"❌ public_key length looks wrong ({len(pub)})", file=sys.stderr) + return 1 + try: + n = set_server_pubkey(name, pub) + if n == 0: + print(f"⚠️ no matching server rows for name='{name}'") + else: + print(f"updated server.public_key for {n} row(s) where name='{name}'") + return 0 + except (sqlite3.Error, FileNotFoundError) as e: + print(f"❌ {e}", file=sys.stderr) + return 1 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/ls_iface.py b/developer/tunnel-client/ls_iface.py new file mode 100755 index 0000000..e9454f0 --- /dev/null +++ b/developer/tunnel-client/ls_iface.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python3 +""" +ls_client.py — list client from the DB + +Default output: interface names, one per line. + +Options: + -i, --iface IFACE Filter to a single interface (exact match) + -l, --long Show a table with iface, rt_table_name, rt_table_id, addr, autostart, updated_at + -h, --help Show usage +""" + +from __future__ import annotations +import sys +import argparse +import sqlite3 +from typing import List, Tuple +import incommon as ic # DB_PATH, open_db() + +def parse_args(argv: List[str]) -> argparse.Namespace: + ap = argparse.ArgumentParser(add_help=False, prog="ls_client.py", description="List client from the DB") + ap.add_argument("-i","--iface", help="Filter by interface (exact match)") + ap.add_argument("-l","--long", action="store_true", help="Long table output") + ap.add_argument("-h","--help", action="help", help="Show this help and exit") + return ap.parse_args(argv) + +def fmt_table(headers: List[str], rows: List[Tuple]) -> str: + if not rows: return "" + # normalize to strings; keep empty for None + rows = [[("" if c is None else str(c)) for c in r] for r in rows] + cols = list(zip(*([headers] + rows))) + widths = [max(len(x) for x in col) for col in cols] + line = lambda r: " ".join(f"{str(c):<{w}}" for c, w in zip(r, widths)) + out = [line(headers), line(tuple("-"*w for w in widths))] + out += [line(r) for r in rows] + return "\n".join(out) + +def list_names(conn: sqlite3.Connection, iface: str | None) -> int: + if iface: + rows = conn.execute("SELECT iface FROM Iface WHERE iface=? ORDER BY iface;", (iface,)).fetchall() + else: + rows = conn.execute("SELECT iface FROM Iface ORDER BY iface;").fetchall() + for (name,) in rows: + print(name) + return 0 + +def list_long(conn: sqlite3.Connection, iface: str | None) -> int: + if iface: + rows = conn.execute(""" + SELECT c.iface, + v.rt_table_name_eff AS rt_table_name, + COALESCE(c.rt_table_id,'') AS rt_table_id, + c.local_address_cidr, + c.autostart, + c.updated_at + FROM Iface c + JOIN v_client_effective v ON v.id = c.id + WHERE c.iface = ? + ORDER BY c.iface; + """, (iface,)).fetchall() + else: + rows = conn.execute(""" + SELECT c.iface, + v.rt_table_name_eff AS rt_table_name, + COALESCE(c.rt_table_id,'') AS rt_table_id, + c.local_address_cidr, + c.autostart, + c.updated_at + FROM Iface c + JOIN v_client_effective v ON v.id = c.id + ORDER BY c.iface; + """).fetchall() + + hdr = ["iface","rt_table_name","rt_table_id","addr","autostart","updated_at"] + txt = fmt_table(hdr, rows) + if txt: print(txt) + return 0 + +def main(argv: List[str]) -> int: + args = parse_args(argv) + try: + with ic.open_db() as conn: + return list_long(conn, args.iface) if args.long else list_names(conn, args.iface) + except (sqlite3.Error, FileNotFoundError) as e: + print(f"❌ {e}", file=sys.stderr) + return 2 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/ls_key.py b/developer/tunnel-client/ls_key.py new file mode 100755 index 0000000..535c7c9 --- /dev/null +++ b/developer/tunnel-client/ls_key.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# ls_keys.py — list WireGuard public keys only +# Usage: +# ./ls_keys.py # all client/server +# ./ls_keys.py -i x6 # only iface x6 + +from __future__ import annotations +import sys, argparse, sqlite3 +from pathlib import Path +from typing import List, Tuple +import incommon as ic # DB_PATH, open_db() + +def format_table(headers: List[str], rows: List[Tuple]) -> str: + if not rows: + return "(none)" + cols = list(zip(*([headers] + [[("" if c is None else str(c)) for c in r] for r in rows]))) + widths = [max(len(x) for x in col) for col in cols] + def line(r): return " ".join(f"{str(c):<{w}}" for c, w in zip(r, widths)) + out = [line(headers), line(tuple("-"*w for w in widths))] + for r in rows: out.append(line(r)) + return "\n".join(out) + +def list_client_keys(conn: sqlite3.Connection, iface: str | None, banner=False) -> str: + if banner: + print("\n=== Public keys generated locally by client, probably by using `key_client_generate.py`===") + rows = conn.execute( + "SELECT iface, public_key AS client_public_key " + "FROM Iface " + + ("WHERE iface=? " if iface else "") + + "ORDER BY iface;", + ((iface,) if iface else tuple()), + ).fetchall() + return format_table(["iface","client_public_key"], rows) + +def list_server_keys(conn: sqlite3.Connection, iface: str | None ,banner=False) -> str: + if banner: + print("\n=== Public keys imported from remote server, probably edited into db_init_server_.py ===") + rows = conn.execute( + "SELECT c.iface AS client, s.name AS server, s.public_key AS server_public_key " + "FROM server s JOIN Iface c ON c.id = s.iface_id " + + ("WHERE c.iface=? " if iface else "") + + "ORDER BY c.iface, s.name;", + ((iface,) if iface else tuple()), + ).fetchall() + return format_table(["client","server","server_public_key"], rows) + +def client_pub_for_iface(conn: sqlite3.Connection, iface: str) -> str | None: + r = conn.execute("SELECT public_key FROM Iface WHERE iface=? LIMIT 1;", (iface,)).fetchone() + return (r[0] if r and r[0] else None) + +def main(argv: List[str]) -> int: + ap = argparse.ArgumentParser(description="List WireGuard public keys from the local DB.") + ap.add_argument("-i","--iface", help="filter for one iface (e.g., x6)") + args = ap.parse_args(argv) + + try: + # Ensure DB exists + if not ic.DB_PATH.exists(): + print(f"❌ DB not found: {ic.DB_PATH}", file=sys.stderr) + return 1 + with ic.open_db() as conn: + print(list_client_keys(conn, args.iface, banner=True)) + print() + print(list_server_keys(conn, args.iface, banner=True)) + if args.iface: + cpub = client_pub_for_iface(conn, args.iface) + if cpub: + print() + print("# Copy to server peer config if needed:") + print(f'CLIENT_PUB="{cpub}"') + return 0 + except (sqlite3.Error, FileNotFoundError) as e: + print(f"❌ {e}", file=sys.stderr) + return 1 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/ls_server.py b/developer/tunnel-client/ls_server.py new file mode 100755 index 0000000..e1ee92d --- /dev/null +++ b/developer/tunnel-client/ls_server.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python3 +""" +ls_server.py — list server from the DB + +Default output: server names, one per line. + +Options: + -i, --iface IFACE Filter to a single client interface (e.g., x6, US) + -l, --long Show a table with client, name, endpoint, allowed_ips, priority + -h, --help Show usage +""" + +from __future__ import annotations +import sys +import sqlite3 +import argparse +from typing import List, Tuple +import incommon as ic # DB_PATH, open_db() + +def parse_args(argv: List[str]) -> argparse.Namespace: + ap = argparse.ArgumentParser(add_help=False, prog="ls_server.py", description="List server from the DB") + ap.add_argument("-i","--iface", help="Filter by client interface") + ap.add_argument("-l","--long", action="store_true", help="Long table output") + ap.add_argument("-h","--help", action="help", help="Show this help and exit") + return ap.parse_args(argv) + +def fmt_table(headers: List[str], rows: List[Tuple]) -> str: + if not rows: return "" + cols = list(zip(*([headers] + [[("" if c is None else str(c)) for c in r] for r in rows]))) + widths = [max(len(x) for x in col) for col in cols] + line = lambda r: " ".join(f"{str(c):<{w}}" for c, w in zip(r, widths)) + out = [line(headers), line(tuple("-"*w for w in widths))] + for r in rows: out.append(line(r)) + return "\n".join(out) + +def list_names(conn: sqlite3.Connection, iface: str | None) -> int: + if iface: + rows = conn.execute(""" + SELECT s.name + FROM server s + JOIN Iface c ON c.id = s.iface_id + WHERE c.iface = ? + ORDER BY s.name + """, (iface,)).fetchall() + else: + rows = conn.execute("SELECT name FROM server ORDER BY name").fetchall() + for (name,) in rows: + print(name) + return 0 + +def list_long(conn: sqlite3.Connection, iface: str | None) -> int: + if iface: + rows = conn.execute(""" + SELECT c.iface, + s.name, + s.endpoint_host || ':' || CAST(s.endpoint_port AS TEXT) AS endpoint, + s.allowed_ips, + s.priority + FROM server s + JOIN Iface c ON c.id = s.iface_id + WHERE c.iface = ? + ORDER BY c.iface, s.priority, s.name + """, (iface,)).fetchall() + else: + rows = conn.execute(""" + SELECT c.iface, + s.name, + s.endpoint_host || ':' || CAST(s.endpoint_port AS TEXT) AS endpoint, + s.allowed_ips, + s.priority + FROM server s + JOIN Iface c ON c.id = s.iface_id + ORDER BY c.iface, s.priority, s.name + """).fetchall() + + hdr = ["client","name","endpoint","allowed_ips","priority"] + txt = fmt_table(hdr, rows) + if txt: print(txt) + return 0 + +def main(argv: List[str]) -> int: + args = parse_args(argv) + try: + with ic.open_db() as conn: + return list_long(conn, args.iface) if args.long else list_names(conn, args.iface) + except (sqlite3.Error, FileNotFoundError) as e: + print(f"❌ {e}", file=sys.stderr) + return 2 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/ls_server_setting.py b/developer/tunnel-client/ls_server_setting.py new file mode 100755 index 0000000..594cd70 --- /dev/null +++ b/developer/tunnel-client/ls_server_setting.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 +""" +ls_server_settings.py — print server-side WireGuard [Peer] stanzas from the DB + +Purpose: + Emit configuration that belongs in a *server* wg conf (e.g., /etc/wireguard/wg0.conf). + One [Peer] block per (client, server) row. + +What is printed (per block): + - PublicKey = client's public key (from client.public_key) + - AllowedIPs = client's tunnel address(es) as seen by the server (from client.local_address_cidr) + (Use /32 per client. If multiple /32 per client are later added, enumerate them.) + - PresharedKey = server.preshared_key (only if present) + +Notes: + - Endpoint is NOT set on the server for client peers (client usually dials the server). + - PersistentKeepalive is generally set on the client; server may omit it. + +Usage: + ./ls_server_settings.py # all client and their server entries + ./ls_server_settings.py x6 us # only for these client ifaces + ./ls_server_settings.py --server x6 # filter by server.name +""" + +from __future__ import annotations +import sys, sqlite3 +from typing import Iterable, List, Optional, Sequence, Tuple +from pathlib import Path + +# local helper import is optional; only used to locate db path if present +try: + import incommon as ic + DB_PATH = ic.DB_PATH +except Exception: + DB_PATH = Path(__file__).resolve().parent / "db" / "store" + +def die(msg: str, code: int = 1) -> None: + print(f"❌ {msg}", file=sys.stderr); sys.exit(code) + +def open_db(path: Path) -> sqlite3.Connection: + if not path.exists(): die(f"DB not found: {path}") + return sqlite3.connect(path.as_posix()) + +def parse_args(argv: Sequence[str]) -> Tuple[List[str], Optional[str]]: + ifaces: List[str] = [] + server_filter: Optional[str] = None + it = iter(argv) + for a in it: + if a == "--server": + try: server_filter = next(it) + except StopIteration: die("--server requires a value") + else: + ifaces.append(a) + return ifaces, server_filter + +def rows(conn: sqlite3.Connection, q: str, params: Iterable = ()) -> List[tuple]: + cur = conn.execute(q, tuple(params)) + out = cur.fetchall() + cur.close() + return out + +def collect(conn: sqlite3.Connection, ifaces: List[str], server_filter: Optional[str]) -> List[dict]: + where = [] + args: List = [] + if ifaces: + ph = ",".join("?" for _ in ifaces) + where.append(f"c.iface IN ({ph})") + args.extend(ifaces) + if server_filter: + where.append("s.name = ?") + args.append(server_filter) + w = ("WHERE " + " AND ".join(where)) if where else "" + q = f""" + SELECT c.id, c.iface, c.public_key, c.local_address_cidr, + s.name, s.preshared_key, s.endpoint_host, s.endpoint_port + FROM Iface c + LEFT JOIN server s ON s.iface_id = c.id + {w} + ORDER BY s.name, c.iface, s.priority ASC, s.id ASC; + """ + R = rows(conn, q, args) + out: List[dict] = [] + for cid, iface, cpub, cidr, sname, psk, host, port in R: + out.append({ + "iface_id": cid, + "iface": iface or "", + "client_pub": cpub or "", + "client_cidr": cidr or "", + "server_name": sname or "(unassigned)", + "server_host": host or "", + "server_port": port or None, + "psk": psk or None, + }) + return out + +def print_header() -> None: + print("# === Server-side WireGuard peer stanzas ===") + print("# Place each [Peer] block into the server's wg conf (e.g., /etc/wireguard/wg0.conf).") + print("# Endpoint is not set for client peers on the server.") + print("# AllowedIPs must be /32 per client address; enumerate multiple /32 if a client uses several.") + print() + +def print_blocks(items: List[dict]) -> None: + if not items: + print("# (no rows matched)"); return + print_header() + # group by server_name for readability + cur_group = None + for r in items: + grp = r["server_name"] + if grp != cur_group: + cur_group = grp + ep = f" ({r['server_host']}:{r['server_port']})" if r["server_host"] and r["server_port"] else "" + print(f"## Server: {grp}{ep}") + # stanza + print("[Peer]") + print(f"# client iface={r['iface']} tunnel={r['client_cidr']}") + print(f"PublicKey = {r['client_pub']}") + # AllowedIPs: prefer the exact CIDR stored for the client (typically /32) + print(f"AllowedIPs = {r['client_cidr']}") + if r["psk"]: + print(f"PresharedKey = {r['psk']}") + print() + # end + +def main(argv: Sequence[str]) -> int: + ifaces, server_filter = parse_args(argv) + try: + with open_db(DB_PATH) as conn: + items = collect(conn, ifaces, server_filter) + except sqlite3.Error as e: + die(f"sqlite error: {e}") + print_blocks(items) + return 0 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/ls_servers.sh b/developer/tunnel-client/ls_servers.sh new file mode 100755 index 0000000..5d4f4ef --- /dev/null +++ b/developer/tunnel-client/ls_servers.sh @@ -0,0 +1,7 @@ + +# ls_server.sh +#!/usr/bin/env bash +set -euo pipefail +DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd)" +DB="$DIR/db/store" +sqlite3 -noheader -batch "$DB" "SELECT name FROM server ORDER BY name;" diff --git a/developer/tunnel-client/ls_user.py b/developer/tunnel-client/ls_user.py new file mode 100755 index 0000000..90c0ef2 --- /dev/null +++ b/developer/tunnel-client/ls_user.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +""" +ls_users.py — print " " from DB (names only) + +- Validates required tables exist (client, User) +- No side effects; read-only +""" + +from __future__ import annotations +import sys +import sqlite3 +import incommon as ic # DB_PATH, open_db() + +HELP = """Usage: ls_users.py +Prints one line per user binding as: " ". +""" + +def tables_ok(conn: sqlite3.Connection) -> bool: + row = conn.execute( + """ + SELECT + (SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='client'), + (SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='User') + """ + ).fetchone() + return row == (1, 1) + +def list_users(conn: sqlite3.Connection) -> None: + cur = conn.execute( + """ + SELECT ub.username, c.iface + FROM User ub + JOIN Iface c ON c.id = ub.iface_id + ORDER BY c.iface, ub.username + """ + ) + for username, iface in cur.fetchall(): + print(f"{username} {iface}") + +def main(argv: list[str]) -> int: + if argv and argv[0] in ("-h", "--help"): + print(HELP.strip()); return 0 + try: + with ic.open_db() as conn: + if not tables_ok(conn): + print("❌ Missing tables (client/User). Initialize the database first.", file=sys.stderr) + return 1 + list_users(conn) + return 0 + except (sqlite3.Error, FileNotFoundError) as e: + print(f"❌ {e}", file=sys.stderr) + return 2 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/manual_reference.org b/developer/tunnel-client/manual_reference.org new file mode 100644 index 0000000..6b0b894 --- /dev/null +++ b/developer/tunnel-client/manual_reference.org @@ -0,0 +1,90 @@ +#+title: WireGuard Client — Reference +#+author: Thomas / Aerenis +#+startup: showall + +* Directory layout (wg/) +- =schema.sql= :: SQLite schema for clients/servers/routes/meta (keys stored in DB). +- =wg_client.db= :: SQLite DB (created by =db_init.sh=). +- =db_init.sh= :: Creates/initializes DB from =schema.sql= (user-space). +- =client_create_keys.sh= :: Creates a fresh client keypair for an =iface= and stores into DB. +- =config_client_StanleyPark.sh= :: Upserts the StanleyPark client row (iface, addr, mtu, dns_mode, autostart, etc.). +- =config_server_x6.sh= :: Upserts the remote server (“x6”) row linked to the client. +- =bind_user.sh= :: Binds a Linux username (and resolves UID) to a client interface in DB. +- =ls_clients.sh= :: Lists interface names only (one per line). +- =ls_servers.sh= :: Lists server names (optionally grouped by client). +- =ls_users.sh= :: Lists = = pairs. +- =inspect.sh= :: Shows effective config from DB and current system state for a given iface. +- =IP_rule_add_UID.sh= :: Helper installed to =/usr/local/bin= (adds =ip rule uidrange= entries idempotently). +- =stage_generate.sh= :: Builds staged artifacts from DB: + - =stage/wireguard/.conf= + - =stage/systemd/wg-quick@.d/restart.conf= + - =stage/usr/local/bin/routes_init_.sh= + - copies =IP_rule_add_UID.sh= into stage for install + - Offers to clean stage first; supports =--clean=, =--no-clean=, =--dry-clean= +- =stage_install.sh= :: Copies staged files into: + - =/etc/wireguard/.conf= + - =/etc/systemd/system/wg-quick@.d/restart.conf= + - =/usr/local/bin/routes_init_.sh= + - =/usr/local/bin/IP_rule_add_UID.sh= + - Reloads systemd daemon and prints next steps. +- =stage_clean.sh= :: Empties =./stage= safely (with confirmation). +- =routes_init_x6.sh= :: (Legacy) Example per-iface route script; superseded by staged =routes_init_.sh= +- =deprecated/= :: Old scripts retained for reference. +- =stage/= :: Generated artifacts awaiting installation. +- =scratchpad/= :: (Optional) Temporary workspace for ad-hoc edits before installation. + +* Schema (summary) +- =clients= + - =iface= (TEXT UNIQUE): bare interface name (e.g., ‘x6’) + - =rt_table_id= (INTEGER): e.g., 1002 + - =rt_table_name= (TEXT): defaults to iface if NULL (used by route scripts and =ip rule=) + - =bound_user= (TEXT), =bound_uid= (INTEGER): Linux user + UID that should egress via this iface + - =local_address_cidr=, =private_key=, =public_key=, =mtu=, =fwmark= + - =dns_mode= (‘none’ or ‘static’), =dns_servers= (if static) + - =autostart= (0/1) +- =servers= + - Linked by =client_id= → =clients.id= + - =name= (‘x6’), =public_key=, optional =preshared_key= + - =endpoint_host=, =endpoint_port=, =allowed_ips=, =keepalive_s= + - =route_allowed_ips= (0/1): when 0, =Table= is set to =off= in wg conf and routing is handled by our scripts + - =priority= (lower preferred) — first by priority then id is staged +- =routes= + - Linked by =client_id= + - =cidr=, optional =via=, optional =table_name= (else use client rt name), optional =metric= + - =on_up= (1/0), =on_down= (1/0) — generator emits only =on_up= routes in =routes_init_.sh= +- =meta= + - =schema= key describing current schema version/string + +* Generated files (stage/) +- wireguard/.conf :: + - =[Interface]= :: Address, PrivateKey, optional MTU/FwMark/DNS, optional =Table= off + - =[Peer]= :: Server public key, optional PSK, Endpoint, AllowedIPs, optional PersistentKeepalive +- systemd/wg-quick@.d/restart.conf :: + - Restart policy; force fresh link; =ExecStartPost= hooks: + - routes init script + - =IP_rule_add_UID.sh = (if bound) + - logger line +- usr/local/bin/routes_init_.sh :: + - Installs default route to device in =rt_table_name= and a blackhole default guard + - Adds any DB =routes= with =on_up=1 + +* Operational Notes +- =iface= names are bare (not prefixed with =wg_=). Systemd unit is =wg-quick@.service=. +- Unbound rides the tunnel; leave WireGuard DNS unset (=dns_mode=none=) unless you want static DNS in the conf. +- Copy-based install preserves an audit trail in =./stage=. Clean explicitly when desired. + +* Security +- The DB contains *private keys*. Restrict permissions: + #+begin_src bash + chmod 600 wg_client.db + #+end_src +- Back up =wg_client.db= securely. + +* Troubleshooting +- If unit fails to start: =journalctl -u wg-quick@ -b= +- Handshake age / peer state: =wg show= +- Routing: =ip rule=, =ip route show table = +- Regenerate & reinstall on mismatch: + #+begin_src bash + ./stage_generate.sh --clean && sudo ./stage_install.sh && sudo systemctl restart wg-quick@ + #+end_src diff --git a/developer/tunnel-client/manual_user.org b/developer/tunnel-client/manual_user.org new file mode 100644 index 0000000..bef4b37 --- /dev/null +++ b/developer/tunnel-client/manual_user.org @@ -0,0 +1,104 @@ +#+title: WireGuard Client — Admin User Guide +#+author: Thomas / Aerenis +#+startup: showall + +* Overview +Authoritative state lives here: +- ~/executable/setup/Debian12_client/wg/ +- Keys + config live in *SQLite* (./db/store). +- You *stage* generated files in ./stage/, then *install* as root. +- Interface names are *bare* (e.g., =x6=, =US=). Unit: =wg-quick@=; config: =/etc/wireguard/.conf=. +- Unbound is used for DNS; typically =dns_mode= is =none= (no =DNS= line in WG conf). +- Staging dirs are not auto-cleaned; each of =db/=, =stage/=, =scratchpad/= contains a =.gitignore= that ignores everything except itself. + +* Typical Workflow (example: x6) +1) Initialize DB +#+begin_src bash +./db_init.sh +#+end_src + +2) Create/Update *client* record for this host (inserts the =x6= row) +#+begin_src bash +./config_client_StanleyPark.sh +#+end_src + +3) Create/rotate *client keys* (writes keys into DB for =x6=) +#+begin_src bash +./client_create_keys.sh x6 +#+end_src + +4) Configure the *remote server* record (x6) +#+begin_src bash +./config_server_x6.sh +#+end_src + +5) Bind Linux user(s) to interface (traffic steering via uid rules) +#+begin_src bash +./user_to_iface.sh Thomas-x6 x6 +# or bulk: +./user_all_to_iface.sh +#+end_src +Verify: +#+begin_src bash +./ls_users.sh +#+end_src + +6) Generate staged files (will offer to clean ./stage first) +#+begin_src bash +./stage_generate.sh +#+end_src +Review contents of =./stage= (WG conf, systemd drop-in, route script). + +7) Install (as root) — copies staged files into the system +#+begin_src bash +sudo ./stage_install.sh +#+end_src + +8) Enable & start the interface +#+begin_src bash +sudo systemctl enable wg-quick@x6 +sudo systemctl start wg-quick@x6 +#+end_src + +9) Inspect / validate +#+begin_src bash +./inspect.sh x6 +ip rule | grep x6 +ip route show table x6 +wg show +#+end_src + +* Key Rotation (client) +- Update keys in DB and redeploy: +#+begin_src bash +./client_create_keys.sh x6 +./stage_generate.sh --clean +sudo ./stage_install.sh +sudo systemctl restart wg-quick@x6 +#+end_src +- Then update the *server’s* peer public key accordingly. + +* Listing helpers +- Interfaces: +#+begin_src bash +./ls_clients.sh # prints: x6, US, ... +#+end_src +- Servers (per client): +#+begin_src bash +./ls_servers.sh # prints server names per client +#+end_src +- User bindings: +#+begin_src bash +./ls_users.sh # prints: +#+end_src + +* Notes +- =./stage= is not auto-cleaned. Use: +#+begin_src bash +./stage_clean.sh +#+end_src +- Protect your DB (contains private keys): +#+begin_src bash +chmod 700 db +chmod 600 db/store +#+end_src diff --git a/developer/tunnel-client/scratchpad/.gitignore b/developer/tunnel-client/scratchpad/.gitignore new file mode 100644 index 0000000..53642ce --- /dev/null +++ b/developer/tunnel-client/scratchpad/.gitignore @@ -0,0 +1,4 @@ + +* +!.gitignore + diff --git a/developer/tunnel-client/stage/.gitignore b/developer/tunnel-client/stage/.gitignore new file mode 100644 index 0000000..53642ce --- /dev/null +++ b/developer/tunnel-client/stage/.gitignore @@ -0,0 +1,4 @@ + +* +!.gitignore + diff --git a/developer/tunnel-client/stage_IP_apply_script.py b/developer/tunnel-client/stage_IP_apply_script.py new file mode 100755 index 0000000..82e2baa --- /dev/null +++ b/developer/tunnel-client/stage_IP_apply_script.py @@ -0,0 +1,508 @@ +#!/usr/bin/env python3 +""" +stage_IP_apply_script.py + +Given: + - A SQLite DB (schema you’ve defined), with: + * Iface(id, iface, local_address_cidr, rt_table_name, rt_table_id) + * v_iface_effective(id, rt_table_name_eff, local_address_cidr) + * Route(iface_id, cidr, via, table_name, metric, on_up, on_down) + * "User"(iface_id, username, uid) — table formerly User_Binding + * Meta(key='subu_cidr', value) + - A list of interface names to include (e.g., ["x6","US"]). + +Does: + - Reads DB once and *synthesizes a single* idempotent runtime script + that, for the selected interfaces, on each `wg-quick@IFACE` start: + 1) resets IPv4 addresses on the iface (delete-if-present, then add) + 2) ensures all configured routes exist (using `ip -4 route replace`) + 3) resets policy rules by preference number (delete-by-pref, then add) + with **per-iface prefs** to avoid collisions. + - Stages that script under: stage/usr/local/bin/ + - Stages per-iface systemd drop-ins: + stage/etc/systemd/system/wg-quick@IFACE.service.d/-postup-IP-state.conf + which call the script (default prio = 20). + - Stages a merged copy of rt_tables (does not write the live /etc/iproute2/rt_tables). + +Returns: + (script_path, notes[list of strings]) + +Errors: + - Raises RuntimeError if no interfaces provided or there’s nothing to emit. + - Does not modify kernel state — this is staging only. + +Notes: + - Addresses: reset pattern (del → add) for deterministic convergence. + - Routes: `ip -4 route replace` (best-practice) with tolerant logging. + - Rules: reset by `pref` (del-by-pref → add). Prefs are unique per iface: + base = 17000 + Iface.id * 10 + from_pref = base + 0 + uid_pref = base + 1 + - The runtime script accepts optional IFACE args to limit application. +""" + +from __future__ import annotations +from pathlib import Path +from typing import Dict, Iterable, List, Optional, Sequence, Tuple +import argparse +import sqlite3 +import sys + +import incommon as ic # expected: open_db() + +ROOT = Path(__file__).resolve().parent +STAGE_ROOT = ROOT / "stage" + +RT_TABLES_PATH = Path("/etc/iproute2/rt_tables") + + +# ---------- helpers for notes ---------- + +def _stage_note(path: Path, stage_root: Path) -> str: + """Return a short path like 'stage:/usr/local/bin/apply_IP_state.sh'.""" + try: + rel = path.relative_to(stage_root) + return f"stage:/{rel.as_posix()}" + except ValueError: + return str(path) + + +# ---------- rt_tables helpers ---------- + +def _parse_rt_tables(path: Path) -> Tuple[List[str], Dict[str, int], set[int]]: + """ + Returns (lines, name_to_num, used_nums). + Keeps original lines for a non-destructive merge. + """ + text = path.read_text() if path.exists() else "" + lines = text.splitlines() + name_to_num: Dict[str, int] = {} + used_nums: set[int] = set() + for ln in lines: + s = ln.strip() + if not s or s.startswith("#"): + continue + parts = s.split() + if len(parts) >= 2 and parts[0].isdigit(): + n = int(parts[0]); nm = parts[1] + if nm not in name_to_num and n not in used_nums: + name_to_num[nm] = n + used_nums.add(n) + return (lines, name_to_num, used_nums) + + +def _first_free_id(used_nums: Iterable[int], low: int, high: int) -> int: + used = set(used_nums) + for n in range(low, high + 1): + if n not in used: + return n + raise RuntimeError(f"no free routing-table IDs in [{low},{high}]") + + +def _stage_rt_tables( + stage_root: Path, + meta: Dict[str, Tuple[int, Optional[int], str, Optional[str]]], + low: int = 20000, + high: int = 29999 +) -> Tuple[Path, List[str]]: + """ + Ensure entries for all effective table names present in `meta`. + Prefer DB rt_table_id when available and not conflicting. + Write merged file to stage/etc/iproute2/rt_tables. + Returns (staged_path, notes) + """ + lines, name_to_num, used_nums = _parse_rt_tables(RT_TABLES_PATH) + + # Build eff_name -> preferred_num mapping (first non-None rt_id wins) + eff_to_preferred: Dict[str, Optional[int]] = {} + for _n, (_iid, rtid, eff, _cidr) in meta.items(): + if eff not in eff_to_preferred: + eff_to_preferred[eff] = rtid if rtid is not None else None + + additions: List[Tuple[int, str]] = [] + for eff_name, preferred_num in eff_to_preferred.items(): + if eff_name in name_to_num: + continue # already present + if preferred_num is not None and preferred_num not in used_nums: + num = preferred_num + else: + num = _first_free_id(used_nums, low, high) + name_to_num[eff_name] = num + used_nums.add(num) + additions.append((num, eff_name)) + + out = stage_root / "etc" / "iproute2" / "rt_tables" + out.parent.mkdir(parents=True, exist_ok=True) + + if not additions: + # still write a copy of current file so install step is uniform + out.write_text("\n".join(lines) + ("\n" if lines else "")) + return (out, ["rt_tables: no additions (kept existing map)"]) + + new_lines = list(lines) + for num, name in sorted(additions): + new_lines.append(f"{num} {name}") + + out.write_text("\n".join(new_lines) + "\n") + notes = [f"rt_tables: add {num} {name}" for num, name in sorted(additions)] + return (out, notes) + + +# ---------- DB access ---------- + +def _fetch_meta_subu_cidr(conn: sqlite3.Connection, default="10.0.0.0/24") -> str: + row = conn.execute("SELECT value FROM Meta WHERE key='subu_cidr' LIMIT 1;").fetchone() + return str(row[0]) if row and row[0] else default + + +def _fetch_iface_meta(conn: sqlite3.Connection, iface_names: Sequence[str]) -> Dict[str, Tuple[int, Optional[int], str, Optional[str]]]: + """ + Return {iface_name -> (iface_id, rt_table_id, rt_table_name_eff, local_address_cidr_or_None)}. + """ + if not iface_names: + return {} + ph = ",".join("?" for _ in iface_names) + sql = f""" + SELECT i.id, + i.iface, + i.rt_table_id, + v.rt_table_name_eff, + NULLIF(TRIM(v.local_address_cidr),'') AS cidr + FROM Iface i + JOIN v_iface_effective v ON v.id = i.id + WHERE i.iface IN ({ph}) + ORDER BY i.id; + """ + rows = conn.execute(sql, tuple(iface_names)).fetchall() + out: Dict[str, Tuple[int, Optional[int], str, Optional[str]]] = {} + for r in rows: + iface_id = int(r[0]); name = str(r[1]) + rt_id = (int(r[2]) if r[2] is not None else None) + eff = str(r[3]) + cidr = (str(r[4]) if r[4] is not None else None) + out[name] = (iface_id, rt_id, eff, cidr) + return out + + +def _fetch_routes_by_iface_id( + conn: sqlite3.Connection, + iface_ids: Sequence[int], + only_on_up: bool = True +) -> Dict[int, List[Tuple[str, Optional[str], Optional[str], Optional[int]]]]: + """ + Return {iface_id -> [(cidr, via, table_name_or_None, metric_or_None), ...]}. + """ + if not iface_ids: + return {} + ph = ",".join("?" for _ in iface_ids) + sql = f""" + SELECT iface_id, + cidr, + NULLIF(TRIM(via),'') AS via, + NULLIF(TRIM(table_name),'') AS table_name, + metric, + on_up + FROM Route + WHERE iface_id IN ({ph}) + ORDER BY id; + """ + rows = conn.execute(sql, tuple(iface_ids)).fetchall() + out: Dict[int, List[Tuple[str, Optional[str], Optional[str], Optional[int]]]] = {} + for iface_id, cidr, via, tname, metric, on_up in rows: + if only_on_up and int(on_up) != 1: + continue + out.setdefault(int(iface_id), []).append( + (str(cidr), + (str(via) if via is not None else None), + (str(tname) if tname is not None else None), + (int(metric) if metric is not None else None)) + ) + return out + + +def _fetch_uids_by_iface_id(conn: sqlite3.Connection, iface_ids: Sequence[int]) -> Dict[int, List[int]]: + """ + Return {iface_id -> [uid, ...]} using table "User". + """ + if not iface_ids: + return {} + ph = ",".join("?" for _ in iface_ids) + sql = f""" + SELECT iface_id, + uid + FROM "User" + WHERE iface_id IN ({ph}) + AND uid IS NOT NULL + AND CAST(uid AS TEXT) != '' + ORDER BY iface_id, uid; + """ + rows = conn.execute(sql, tuple(iface_ids)).fetchall() + out: Dict[int, List[int]] = {} + for iface_id, uid in rows: + out.setdefault(int(iface_id), []).append(int(uid)) + return out + + +# ---------- rendering ---------- + +def _render_composite_script( + plan_ifaces: List[str], + meta: Dict[str, Tuple[int, Optional[int], str, Optional[str]]], + routes_by_id: Dict[int, List[Tuple[str, Optional[str], Optional[str], Optional[int]]]], + uids_by_id: Dict[int, List[int]], + subu_cidr: str +) -> str: + """ + Build a single bash script that ensures addresses → routes → rules. + """ + lines: List[str] = [ + "#!/usr/bin/env bash", + "# apply IP state for selected interfaces (addresses, routes, rules) — idempotent", + "set -euo pipefail", + "", + "ALL_ARGS=(\"$@\")", + "", + "want_iface(){", + " local t=$1", + " if [ ${#ALL_ARGS[@]} -eq 0 ]; then return 0; fi", + " for a in \"${ALL_ARGS[@]}\"; do [ \"$a\" = \"$t\" ] && return 0; done", + " return 1", + "}", + "", + "exists_iface(){ ip -o link show dev \"$1\" >/dev/null 2>&1; }", + "", + "# Reset address: delete the exact CIDR if present, then add it back.", + "reset_addr(){", + " local iface=$1; local cidr=$2", + " ip -4 addr del \"$cidr\" dev \"$iface\" >/dev/null 2>&1 || true", + " if ip -4 addr add \"$cidr\" dev \"$iface\"; then", + " logger \"addr set: $iface $cidr\"", + " else", + " logger \"addr add failed (non-fatal): $iface $cidr\"", + " fi", + "}", + "", + "# Ensure route using replace; log but do not fail the unit if kernel says 'exists'.", + "ensure_route(){", + " local table=$1; local cidr=$2; local dev=$3; local via=${4:-}; local metric=${5:-}", + " if [ -n \"$via\" ] && [ -n \"$metric\" ]; then", + " if ip -4 route replace \"$cidr\" via \"$via\" dev \"$dev\" table \"$table\" metric \"$metric\" 2>/dev/null; then", + " logger \"route ensure: table=$table cidr=$cidr dev=$dev via=$via metric=$metric\"", + " else", + " logger \"route ensure (tolerated failure): table=$table cidr=$cidr dev=$dev via=$via metric=$metric\"", + " fi", + " elif [ -n \"$via\" ]; then", + " if ip -4 route replace \"$cidr\" via \"$via\" dev \"$dev\" table \"$table\" 2>/dev/null; then", + " logger \"route ensure: table=$table cidr=$cidr dev=$dev via=$via\"", + " else", + " logger \"route ensure (tolerated failure): table=$table cidr=$cidr dev=$dev via=$via\"", + " fi", + " elif [ -n \"$metric\" ]; then", + " if ip -4 route replace \"$cidr\" dev \"$dev\" table \"$table\" metric \"$metric\" 2>/dev/null; then", + " logger \"route ensure: table=$table cidr=$cidr dev=$dev metric=$metric\"", + " else", + " logger \"route ensure (tolerated failure): table=$table cidr=$cidr dev=$dev metric=$metric\"", + " fi", + " else", + " if ip -4 route replace \"$cidr\" dev \"$dev\" table \"$table\" 2>/dev/null; then", + " logger \"route ensure: table=$table cidr=$cidr dev=$dev\"", + " else", + " logger \"route ensure (tolerated failure): table=$table cidr=$cidr dev=$dev\"", + " fi", + " fi", + "}", + "", + "# Reset a policy rule by numeric preference: delete-by-pref, then add.", + "reset_IP_rule(){", + " # Usage: reset_IP_rule ", + " local pref=$1; shift", + " ip -4 rule del pref \"$pref\" >/dev/null 2>&1 || true", + " if ip -4 rule add \"$@\" pref \"$pref\"; then", + " logger \"rule set: pref=$pref $*\"", + " else", + " logger \"rule add failed (non-fatal): pref=$pref $*\"", + " fi", + "}", + "", + ] + + any_action = False + + # 1) Addresses (reset) + for name in plan_ifaces: + _iid, _rtid, rtname, cidr = meta[name] + if cidr: + lines += [ + f'if want_iface {name}; then', + f' if exists_iface {name}; then reset_addr {name} {cidr}; else logger "skip: iface missing: {name}"; fi', + 'fi' + ] + any_action = True + + # 2) Routes + for name in plan_ifaces: + iid, _rtid, rtname, _cidr = meta[name] + rows = routes_by_id.get(iid, []) + for cidr, via, t_override, metric in rows: + table_eff = t_override or rtname + viastr = (via if via is not None else "") + mstr = (str(metric) if metric is not None else "") + lines += [ + f'if want_iface {name}; then', + f' if exists_iface {name}; then ensure_route "{table_eff}" "{cidr}" "{name}" "{viastr}" "{mstr}"; else logger "skip: iface missing: {name}"; fi', + 'fi' + ] + any_action = True + + # 3) Rules (reset by pref: src-cidr, uids, and one global prohibit) + for name in plan_ifaces: + iid, _rtid, rtname, cidr = meta[name] + + # Per-iface preference block (no collisions) + base_pref = 17000 + iid * 10 + from_pref = base_pref + 0 + uid_pref = base_pref + 1 + + if cidr: + lines += [ + f'if want_iface {name}; then', + f' reset_IP_rule {from_pref} from "{cidr}" lookup "{rtname}"', + 'fi' + ] + any_action = True + + uids = uids_by_id.get(iid, []) + for u in uids: + lines += [ + f'if want_iface {name}; then', + f' reset_IP_rule {uid_pref} uidrange "{u}-{u}" lookup "{rtname}"', + 'fi' + ] + any_action = True + + if subu_cidr: + lines += [ + f'reset_IP_rule 18050 from "{subu_cidr}" prohibit' + ] + any_action = True + + if not any_action: + raise RuntimeError("no IP state to emit for requested interfaces") + + lines += [""] + return "\n".join(lines) + + +def _write_dropin_for_iface(stage_root: Path ,iface: str ,script_name: str ,priority: int) -> Path: + # correct systemd path: /etc/systemd/system/wg-quick@IFACE.service.d/ + d = stage_root / "etc" / "systemd" / "system" / f"wg-quick@{iface}.service.d" + d.mkdir(parents=True ,exist_ok=True) + p = d / f"{priority}-postup-IP-state.conf" + content = ( + "[Service]\n" + f"ExecStartPost=+/usr/local/bin/{script_name} {iface}\n" + ) + p.write_text(content) + return p + + +# ---------- business ---------- + +def stage_IP_apply_script( + conn: sqlite3.Connection, + iface_names: Sequence[str], + stage_root: Optional[Path] = None, + script_name: str = "apply_IP_state.sh", + dropin_priority: int = 20, + only_on_up: bool = True, + with_dropins: bool = True, + dry_run: bool = False +) -> Tuple[Path, List[str]]: + """ + Plan and stage the unified runtime script, a merged rt_tables, and per-iface drop-ins. + """ + if not iface_names: + raise RuntimeError("no interfaces provided") + + meta = _fetch_iface_meta(conn, iface_names) + if not meta: + raise RuntimeError("none of the requested interfaces exist in DB") + + # preserve caller order but skip unknowns (already handled above) + ifaces_in_order = [n for n in iface_names if n in meta] + iface_ids = [meta[n][0] for n in ifaces_in_order] + + routes_by_id = _fetch_routes_by_iface_id(conn, iface_ids, only_on_up=only_on_up) + uids_by_id = _fetch_uids_by_iface_id(conn, iface_ids) + subu_cidr = _fetch_meta_subu_cidr(conn, default="10.0.0.0/24") + + sr = stage_root or STAGE_ROOT + out = sr / "usr" / "local" / "bin" / script_name + out.parent.mkdir(parents=True, exist_ok=True) + + content = _render_composite_script(ifaces_in_order, meta, routes_by_id, uids_by_id, subu_cidr) + + notes: List[str] = [] + if dry_run: + notes.append(f"dry-run: would write {_stage_note(out, sr)}") + if with_dropins: + for n in ifaces_in_order: + notes.append(f"dry-run: would write {_stage_note(sr / 'etc' / 'systemd' / 'system' / f'wg-quick@{n}.service.d' / f'{dropin_priority}-postup-IP-state.conf', sr)}") + rt_out = sr / "etc" / "iproute2" / "rt_tables" + notes.append(f"dry-run: would write {_stage_note(rt_out, sr)}") + return (out, notes) + + # ensure rt_tables entries for the effective names used by these ifaces + rt_path, rt_notes = _stage_rt_tables(sr, meta) + notes.extend(rt_notes) + notes.append(f"staged: {_stage_note(rt_path, sr)}") + + out.write_text(content) + out.chmod(0o500) + notes.append(f"staged: {_stage_note(out, sr)}") + + if with_dropins: + for n in ifaces_in_order: + dp = _write_dropin_for_iface(sr, n, script_name, dropin_priority) + notes.append(f"staged: {_stage_note(dp, sr)}") + + return (out, notes) + +# Backwards-compatible alias for callers that still import the old name. +stage_ip_apply_script = stage_IP_apply_script + + +# ---------- CLI ---------- + +def main(argv=None) -> int: + ap = argparse.ArgumentParser(description="Stage one script that applies IP addresses, routes, and rules for selected ifaces.") + ap.add_argument("ifaces", nargs="+", help="interface names to include") + ap.add_argument("--script-name", default="apply_IP_state.sh") + ap.add_argument("--dropin-priority", type=int, default=20) + ap.add_argument("--all", action="store_true", help="include routes where on_up=0 as well") + ap.add_argument("--no-dropins", action="store_true", help="do not stage systemd drop-ins") + ap.add_argument("--dry-run", action="store_true") + args = ap.parse_args(argv) + + with ic.open_db() as conn: + try: + out, notes = stage_IP_apply_script( + conn, + args.ifaces, + script_name=args.script_name, + dropin_priority=args.dropin_priority, + only_on_up=(not args.all), + with_dropins=(not args.no_dropins), + dry_run=args.dry_run + ) + except Exception as e: + print(f"error: {e}", file=sys.stderr) + return 2 + + if notes: + print("\n".join(notes)) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/developer/tunnel-client/stage_StanleyPark.py b/developer/tunnel-client/stage_StanleyPark.py new file mode 100755 index 0000000..77264a3 --- /dev/null +++ b/developer/tunnel-client/stage_StanleyPark.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +""" +stage_StanleyPark.py + +Minimal config wrapper for client 'StanleyPark'. +Calls the generic stage orchestrator with the chosen ifaces. +""" + +from __future__ import annotations +from stage_client import stage_client_artifacts + +CLIENT = "StanleyPark" +IFACES = ["x6","US"] # keep this list minimal & declarative + +if __name__ == "__main__": + ok = stage_client_artifacts( + CLIENT + ,IFACES + ) + raise SystemExit(0 if ok else 2) diff --git a/developer/tunnel-client/stage_client.py b/developer/tunnel-client/stage_client.py new file mode 100755 index 0000000..918e6bb --- /dev/null +++ b/developer/tunnel-client/stage_client.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +""" +stage_client.py + +Given: + - A SQLite DB via incommon.open_db() + - A client machine name (for WG PrivateKey lookup under ./key/) + - One or more interface names (e.g., x6, US) + +Does: + 1) Stage WireGuard confs for each iface + 2) Stage a unified IP apply script (addresses, routes, rules) + per-iface drop-ins + +Returns: + True on success, False on failure (prints progress) +""" + +from __future__ import annotations +from pathlib import Path +from typing import Callable ,Optional ,Sequence ,Tuple +import argparse +import subprocess +import sys + +import incommon as ic # open_db() + +ROOT = Path(__file__).resolve().parent +STAGE_ROOT = ROOT / "stage" + + +def _msg_wrapped_call(label: str ,fn: Callable[[], Tuple[Path ,Sequence[str]]]) -> bool: + print(f"→ {label}") + try: + path ,notes = fn() + for n in notes: + print(n) + if path: + print(f"✔ {label}: staged: {path}") + else: + print(f"✔ {label}") + return True + except Exception as e: + print(f"❌ {label}: {e}") + return False + + +def _call_cli(argv: Sequence[str]) -> Tuple[Path ,Sequence[str]]: + cp = subprocess.run(list(argv) ,text=True ,capture_output=True) + if cp.returncode != 0: + raise RuntimeError(cp.stderr.strip() or f"exit {cp.returncode}") + notes = [] + staged_path: Optional[Path] = None + for line in (cp.stdout or "").splitlines(): + notes.append(line) + if line.startswith("staged: "): + try: + staged_path = Path(line.split("staged:",1)[1].strip()) + except Exception: + pass + return (staged_path or STAGE_ROOT ,notes) + + +def _stage_wg_conf_step(client_name: str ,ifaces: Sequence[str]) -> bool: + def _do(): + try: + from stage_wg_conf import stage_wg_conf # type: ignore + with ic.open_db() as conn: + path ,notes = stage_wg_conf( + conn + ,ifaces + ,client_name + ,stage_root=STAGE_ROOT + ,dry_run=False + ) + return (path ,notes) + except Exception: + return _call_cli([str(ROOT / "stage_wg_conf.py") ,client_name ,*ifaces]) + return _msg_wrapped_call(f"stage_wg_conf ({client_name}; {','.join(ifaces)})" ,_do) + + +def _stage_apply_ip_state_step(ifaces: Sequence[str]) -> bool: + def _do(): + try: + from stage_IP_apply_script import stage_ip_apply_script # type: ignore + with ic.open_db() as conn: + path ,notes = stage_ip_apply_script( + conn + ,ifaces + ,stage_root=STAGE_ROOT + ,script_name="apply_ip_state.sh" + ,only_on_up=True + ,dry_run=False + ) + return (path ,notes) + except Exception: + return _call_cli([str(ROOT / "stage_IP_apply_script.py") ,*ifaces]) + return _msg_wrapped_call(f"stage_IP_apply_script ({','.join(ifaces)})" ,_do) + + +def stage_client_artifacts( + client_name: str + ,iface_names: Sequence[str] + ,stage_root: Optional[Path] = None +) -> bool: + if not iface_names: + raise ValueError("no interfaces provided") + if stage_root: + global STAGE_ROOT + STAGE_ROOT = stage_root + + STAGE_ROOT.mkdir(parents=True ,exist_ok=True) + + ok = True + ok = _stage_wg_conf_step(client_name ,iface_names) and ok + ok = _stage_apply_ip_state_step(iface_names) and ok + return ok + + +def main(argv: Optional[Sequence[str]] = None) -> int: + ap = argparse.ArgumentParser(description="Stage all artifacts for a client.") + ap.add_argument("--client" ,required=True ,help="client machine name (for key lookup)") + ap.add_argument("ifaces" ,nargs="+") + args = ap.parse_args(argv) + + ok = stage_client_artifacts( + args.client + ,args.ifaces + ) + return 0 if ok else 2 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/developer/tunnel-client/stage_wg_conf.py b/developer/tunnel-client/stage_wg_conf.py new file mode 100755 index 0000000..28dd4d3 --- /dev/null +++ b/developer/tunnel-client/stage_wg_conf.py @@ -0,0 +1,254 @@ +#!/usr/bin/env python3 +""" +stage_wg_conf.py + +Given: + - SQLite DB reachable via incommon.open_db() + - A list of interface names (e.g., x6 ,US) + - client_machine_name used to locate the private key file under ./key/ + +Does: + - For each iface, stage a minimal WireGuard config to stage/etc/wireguard/.conf: + [Interface] + PrivateKey = > + Table = off + ListenPort = (if the column exists and value is not NULL) + # ListenPort = 51820 (commented if value is absent) + [Peer] (one per Server row for that iface) + PublicKey = + PresharedKey = (only if present) + AllowedIPs = + Endpoint = : + PersistentKeepalive = (only if present) + - Omits Address ,PostUp ,SaveConfig (your systemd drop-in + script handle L3 state) + +Returns: + - (list_of_staged_paths ,notes) + +Errors: + - Missing private key file + - Iface not found + - Server rows missing required fields for that iface +""" + +from __future__ import annotations +from pathlib import Path +from typing import Dict ,Iterable ,List ,Optional ,Sequence ,Tuple +import argparse +import sqlite3 +import sys + +import incommon as ic # expected: open_db() + +ROOT = Path(__file__).resolve().parent +STAGE_ROOT = ROOT / "stage" + + +# ---------- helpers ---------- + +def _has_column(conn: sqlite3.Connection ,table: str ,col: str) -> bool: + cur = conn.execute(f"PRAGMA table_info({table});") + cols = [str(r[1]) for r in cur.fetchall()] + return col in cols + + +def _read_private_key(client_machine_name: str ,key_root: Optional[Path] = None) -> str: + kr = key_root or (ROOT / "key") + path = kr / client_machine_name + if not path.exists(): + raise RuntimeError(f"private key file missing: {path}") + text = path.read_text().strip() + if not text: + raise RuntimeError(f"private key file empty: {path}") + # WireGuard private keys are base64 (typically 44 chars), but don't over-validate here. + return text + + +# ---------- DB ---------- + +def _fetch_iface_ids_and_ports( + conn: sqlite3.Connection + ,iface_names: Sequence[str] +) -> Dict[str ,Tuple[int ,Optional[int]]]: + """ + Return {iface_name -> (iface_id ,listen_port_or_None)} for requested names. + If the listen_port column does not exist, value is None. + """ + if not iface_names: + return {} + ph = ",".join("?" for _ in iface_names) + has_lp = _has_column(conn ,"Iface" ,"listen_port") + select_lp = ", i.listen_port" if has_lp else ", NULL as listen_port" + sql = f""" + SELECT i.id + , i.iface + {select_lp} + FROM Iface i + WHERE i.iface IN ({ph}) + ORDER BY i.id; + """ + rows = conn.execute(sql ,tuple(iface_names)).fetchall() + out: Dict[str ,Tuple[int ,Optional[int]]] = {} + for iid ,name ,lp in rows: + out[str(name)] = (int(iid) ,(int(lp) if lp is not None else None)) + return out + + +def _fetch_peers_for_iface( + conn: sqlite3.Connection + ,iface_id: int +) -> List[Tuple[str ,Optional[str] ,str ,int ,str ,Optional[int] ,int ,int]]: + """ + Return peers as tuples: + (public_key ,preshared_key ,endpoint_host ,endpoint_port ,allowed_ips ,keepalive_s ,priority ,id) + """ + sql = """ + SELECT public_key + , NULLIF(TRIM(preshared_key),'') as preshared_key + , endpoint_host + , endpoint_port + , allowed_ips + , keepalive_s + , priority + , id + FROM Server + WHERE iface_id = ? + ORDER BY priority ASC , id ASC; + """ + rows = conn.execute(sql ,(iface_id,)).fetchall() + out: List[Tuple[str ,Optional[str] ,str ,int ,str ,Optional[int] ,int ,int]] = [] + for pub ,psk ,host ,port ,alips ,ka ,prio ,sid in rows: + out.append((str(pub) ,(str(psk) if psk is not None else None) ,str(host) ,int(port) ,str(alips) ,(int(ka) if ka is not None else None) ,int(prio) ,int(sid))) + return out + + +# ---------- rendering ---------- + +def _render_conf( + iface_name: str + ,private_key: str + ,listen_port: Optional[int] + ,peers: Sequence[Tuple[str ,Optional[str] ,str ,int ,str ,Optional[int] ,int ,int]] +) -> str: + lines: List[str] = [] + lines += [ + "[Interface]" + ,f"PrivateKey = {private_key}" + ,"Table = off" + ] + if listen_port is not None: + lines.append(f"ListenPort = {listen_port}") + else: + lines.append("# ListenPort = 51820") + + lines.append("") # blank before peers + + if not peers: + # You may choose to raise instead; keeping an empty peer set is valid but rarely useful. + lines.append("# (no peers found for this interface)") + + for pub ,psk ,host ,port ,alips ,ka ,_prio ,_sid in peers: + lines += [ + "[Peer]" + ,f"PublicKey = {pub}" + ] + if psk is not None: + lines.append(f"PresharedKey = {psk}") + lines += [ + f"AllowedIPs = {alips}" + ,f"Endpoint = {host}:{port}" + ] + if ka is not None: + lines.append(f"PersistentKeepalive = {ka}") + lines.append("") # blank line between peers + + return "\n".join(lines).rstrip() + "\n" + + +# ---------- business ---------- + +def stage_wg_conf( + conn: sqlite3.Connection + ,iface_names: Sequence[str] + ,client_machine_name: str + ,stage_root: Optional[Path] = None + ,dry_run: bool = False +) -> Tuple[List[Path] ,List[str]]: + """ + Stage /etc/wireguard/.conf for selected ifaces under stage root. + """ + if not iface_names: + raise RuntimeError("no interfaces provided") + priv = _read_private_key(client_machine_name) + + meta = _fetch_iface_ids_and_ports(conn ,iface_names) + if not meta: + raise RuntimeError("none of the requested interfaces exist in DB") + + staged: List[Path] = [] + notes: List[str] = [] + sr = stage_root or STAGE_ROOT + outdir = sr / "etc" / "wireguard" + outdir.mkdir(parents=True ,exist_ok=True) + + for name in iface_names: + if name not in meta: + notes.append(f"skip: iface '{name}' missing from DB") + continue + + iface_id ,listen_port = meta[name] + peers = _fetch_peers_for_iface(conn ,iface_id) + + # basic validation of required peer fields + bad = [] + for pub ,_psk ,host ,port ,alips ,_ka ,_prio ,sid in peers: + if not pub or not host or not alips or not (1 <= int(port) <= 65535): + bad.append(sid) + if bad: + raise RuntimeError(f"iface '{name}': invalid peer rows id={bad}") + + conf_text = _render_conf(name ,priv ,listen_port ,peers) + + out = outdir / f"{name}.conf" + if dry_run: + notes.append(f"dry-run: would write {out}") + else: + out.write_text(conf_text) + out.chmod(0o600) + staged.append(out) + notes.append(f"staged: {out}") + + if not staged and not dry_run: + raise RuntimeError("nothing staged (all missing or skipped)") + + return (staged ,notes) + + +# ---------- CLI ---------- + +def main(argv=None) -> int: + ap = argparse.ArgumentParser(description="Stage minimal WireGuard configs with Table=off and no Address.") + ap.add_argument("client_machine_name" ,help="name used to read ./key/") + ap.add_argument("ifaces" ,nargs="+" ,help="interface names to stage") + ap.add_argument("--dry-run" ,action="store_true") + args = ap.parse_args(argv) + + with ic.open_db() as conn: + try: + paths ,notes = stage_wg_conf( + conn + ,args.ifaces + ,args.client_machine_name + ,dry_run=args.dry_run + ) + except Exception as e: + print(f"error: {e}" ,file=sys.stderr) + return 2 + + if notes: + print("\n".join(notes)) + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/developer/tunnel-client/stage_wipe.py b/developer/tunnel-client/stage_wipe.py new file mode 100755 index 0000000..9270e13 --- /dev/null +++ b/developer/tunnel-client/stage_wipe.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# stage_wipe.py — safely wipe ./stage (keeps hidden files unless --hard) + +from __future__ import annotations +import argparse, shutil, sys +from pathlib import Path + +ROOT = Path(__file__).resolve().parent +STAGE_ROOT = ROOT / "stage" + +def wipe_stage(*, yes: bool=False, dry_run: bool=False, hard: bool=False) -> int: + """Given flags, deletes staged output. Keeps dotfiles unless hard=True.""" + st = STAGE_ROOT + if not st.exists(): + print(f"Nothing to wipe: {st} does not exist.") + return 0 + + # safety: only operate on ./stage relative to this repo folder + if st.resolve() != (ROOT / "stage").resolve(): + print(f"Refusing: unsafe STAGE path: {st}", file=sys.stderr) + return 1 + + # quick stats + try: + count = sum(1 for _ in st.rglob("*")) + except Exception: + count = 0 + + if dry_run: + print(f"DRY RUN — would wipe: {st} (items: {count})") + for p in sorted(st.iterdir()): + print(f" {p.name}") + return 0 + + if not yes: + try: + ans = input(f"Permanently delete contents of {st}? [y/N] ").strip() + except EOFError: + ans = "" + if ans.lower() not in ("y","yes"): + print("Aborted.") + return 0 + + if hard: + shutil.rmtree(st, ignore_errors=True) + print(f"Removed stage dir: {st}") + else: + # remove non-hidden entries only; keep dotfiles (e.g. .gitignore) + for p in st.iterdir(): + if p.name.startswith("."): + continue # preserve hidden entries + try: + if p.is_dir(): + shutil.rmtree(p, ignore_errors=True) + else: + p.unlink(missing_ok=True) + except Exception: + pass + print(f"Cleared contents of: {st} (hidden files preserved)") + return 0 + +def main(argv): + ap = argparse.ArgumentParser() + ap.add_argument("--yes", action="store_true", help="do not prompt") + ap.add_argument("--dry-run", action="store_true", help="show what would be removed") + ap.add_argument("--hard", action="store_true", help="remove the stage dir itself") + args = ap.parse_args(argv) + return wipe_stage(yes=args.yes, dry_run=args.dry_run, hard=args.hard) + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/start_iface.py b/developer/tunnel-client/start_iface.py new file mode 100755 index 0000000..0590d38 --- /dev/null +++ b/developer/tunnel-client/start_iface.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python3 +""" +start_iface.py + +Given: + - One or more WireGuard interface names (e.g., x6, US). + - Optional presence of systemd and wg-quick(8). + - Expected config at /etc/wireguard/.conf. + - Optional staged IP state script at /usr/local/bin/apply_ip_state.sh. + +Does: + - For each iface (best-effort, non-fatal steps): + 0) (optional) systemctl daemon-reload + 1) Start via systemd: systemctl start wg-quick@IFACE.service (unless --no-systemd) + else via wg-quick: wg-quick up IFACE (unless --no-wg-quick) + If the iface already exists and --force is given, it will attempt a + best-effort teardown then retry the start once. + 2) If started (or already present), optionally run IP state script: + /usr/local/bin/apply_ip_state.sh IFACE (unless --skip-ip-state) + - Logs each action taken or skipped. + +Returns: + - Exit 0 on success (even if some steps were no-ops); 2 on argument/privilege errors. + - Prints a concise, per-iface action log. + +Errors: + - If no ifaces are provided, or if not running as root (unless --force-nonroot). + +Notes: + - This does NOT edit config files or DB; it just brings the iface up cleanly. + - Safe to re-run: “already up/exist” conditions are handled. Use --force to + tear down and recreate if needed. +""" + +from __future__ import annotations +from pathlib import Path +from typing import Iterable, List, Sequence +import argparse +import os +import shutil +import subprocess +import sys + + +# ---------- helpers ---------- + +def _run(cmd: Sequence[str]) -> tuple[int, str, str]: + """Run a command, capture stdout/stderr, return (rc, out, err).""" + try: + cp = subprocess.run(cmd, check=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return (cp.returncode, cp.stdout.strip(), cp.stderr.strip()) + except FileNotFoundError: + return (127, "", f"{cmd[0]}: not found") + +def _exists_iface(name: str) -> bool: + rc, _, _ = _run(["ip", "-o", "link", "show", "dev", name]) + return rc == 0 + +def _systemd_present() -> bool: + return shutil.which("systemctl") is not None + +def _wg_quick_present() -> bool: + return shutil.which("wg-quick") is not None + +def _conf_present(name: str) -> bool: + return Path(f"/etc/wireguard/{name}.conf").is_file() + +def _best_effort_teardown(name: str, logs: List[str]) -> None: + """Try to bring an iface down using systemd/wg-quick, then delete link; non-fatal.""" + unit = f"wg-quick@{name}.service" + if _systemd_present(): + rc, out, err = _run(["systemctl", "stop", unit]) + if rc == 0: + logs.append(f"systemctl: stopped {unit}") + else: + logs.append(f"systemctl: stop {unit} (ignored): {err or out or f'rc={rc}'}") + if _wg_quick_present(): + rc, out, err = _run(["wg-quick", "down", name]) + if rc == 0: + logs.append("wg-quick: down ok") + else: + logs.append(f"wg-quick: down (ignored): {err or out or f'rc={rc}'}") + if _exists_iface(name): + rc, out, err = _run(["ip", "link", "del", "dev", name]) + if rc == 0: + logs.append("ip link: deleted leftover device") + else: + logs.append(f"ip link: delete (ignored): {err or out or f'rc={rc}'}") + + +# ---------- business ---------- + +def start_ifaces( + ifaces: Sequence[str], + use_systemd: bool = True, + use_wg_quick: bool = True, + run_ip_state: bool = True, + ip_state_path: str = "/usr/local/bin/apply_ip_state.sh", + daemon_reload: bool = False, + force: bool = False, +) -> List[str]: + """ + Start the given WG ifaces and optionally apply IP state. + Returns a list of log lines. + """ + logs: List[str] = [] + + if not ifaces: + raise RuntimeError("no interfaces provided") + + have_systemd = _systemd_present() + have_wgquick = _wg_quick_present() + have_ipstate = Path(ip_state_path).is_file() + + if use_systemd and daemon_reload and have_systemd: + rc, _out, err = _run(["systemctl", "daemon-reload"]) + if rc == 0: + logs.append("systemctl: daemon-reload") + else: + logs.append(f"systemctl: daemon-reload (ignored): {err or f'rc={rc}'}") + + for name in ifaces: + logs.append(f"== {name} ==") + + # Ensure config exists + if not _conf_present(name): + logs.append(f"config missing: /etc/wireguard/{name}.conf (skip start)") + logs.append(f"status: absent") + logs.append("") + continue + + started = False + already_present = _exists_iface(name) + + # Optionally force recreate if device already around + if already_present and force: + logs.append("iface exists, --force given: tearing down before start") + _best_effort_teardown(name, logs) + already_present = _exists_iface(name) + + # Start via systemd or wg-quick + if use_systemd and have_systemd: + unit = f"wg-quick@{name}.service" + rc, out, err = _run(["systemctl", "start", unit]) + if rc == 0: + logs.append(f"systemctl: started {unit}") + started = True + else: + # If iface already exists, treat as running + if _exists_iface(name): + logs.append(f"systemctl: start {unit} reported error, but iface exists (continuing): {err or out or f'rc={rc}'}") + started = True + else: + logs.append(f"systemctl: start {unit} failed: {err or out or f'rc={rc}'}") + elif use_wg_quick and have_wgquick: + if already_present: + logs.append("wg-quick: iface already present") + started = True + else: + rc, out, err = _run(["wg-quick", "up", name]) + if rc == 0: + logs.append("wg-quick: up ok") + started = True + else: + # If iface popped up anyway, continue + if _exists_iface(name): + logs.append(f"wg-quick: up reported error, but iface exists (continuing): {err or out or f'rc={rc}'}") + started = True + else: + logs.append(f"wg-quick: up failed: {err or out or f'rc={rc}'}") + + else: + logs.append("no start method available (systemd/wg-quick disabled or not found)") + + # If requested, apply IP state post-start (useful when not using systemd drop-ins) + if run_ip_state and have_ipstate: + if _exists_iface(name): + rc, out, err = _run([ip_state_path, name]) + if rc == 0: + logs.append(f"ip-state: applied ({ip_state_path} {name})") + else: + logs.append(f"ip-state: apply failed: {err or out or f'rc={rc}'}") + else: + logs.append("ip-state: skipped (iface not present)") + + # Final status + logs.append(f"status: {'up' if _exists_iface(name) else 'down'}") + logs.append("") # spacer + + return logs + + +# ---------- CLI (wrapper only) ---------- + +def _require_root(allow_nonroot: bool) -> None: + if not allow_nonroot and os.geteuid() != 0: + raise RuntimeError("must run as root (use --force-nonroot to override)") + +def main(argv: Sequence[str] | None = None) -> int: + ap = argparse.ArgumentParser(description="Start one or more WireGuard interfaces safely.") + ap.add_argument("ifaces", nargs="+", help="interface names to start (e.g., x6 US)") + ap.add_argument("--no-systemd", action="store_true", help="do not call systemctl start wg-quick@IFACE") + ap.add_argument("--no-wg-quick", action="store_true", help="do not call wg-quick up IFACE") + ap.add_argument("--skip-ip-state", action="store_true", help="do not run apply_ip_state.sh after start") + ap.add_argument("--ip-state-path", default="/usr/local/bin/apply_ip_state.sh", help="path to the IP state script") + ap.add_argument("--daemon-reload", action="store_true", help="run systemctl daemon-reload before starts") + ap.add_argument("--force", action="store_true", help="if iface exists, tear down first and retry start") + ap.add_argument("--force-nonroot", action="store_true", help="allow running without root (best-effort)") + args = ap.parse_args(argv) + + try: + _require_root(allow_nonroot=args.force_nonroot) + logs = start_ifaces( + args.ifaces, + use_systemd=(not args.no_systemd), + use_wg_quick=(not args.no_wg_quick), + run_ip_state=(not args.skip_ip_state), + ip_state_path=args.ip_state_path, + daemon_reload=args.daemon_reload, + force=args.force, + ) + for line in logs: + print(line) + return 0 + except Exception as e: + print(f"error: {e}", file=sys.stderr) + return 2 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/stop_clean_iface.py b/developer/tunnel-client/stop_clean_iface.py new file mode 100755 index 0000000..7e6a53a --- /dev/null +++ b/developer/tunnel-client/stop_clean_iface.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python3 +""" +stop_clean_iface.py + +Stop one or more WireGuard interfaces and clean IP state (rules/routes/addresses). +""" + +from __future__ import annotations +from pathlib import Path +from typing import Iterable, List, Optional, Sequence, Tuple, Set +import argparse +import os +import re +import shutil +import subprocess +import sys + +__VERSION__ = "1.1-agg-errors" + +RT_TABLES_FILE = Path("/etc/iproute2/rt_tables") + +# ---------- helpers (shell) ---------- + +def _run(cmd: Sequence[str], dry: bool=False) -> tuple[int, str, str]: + if dry: + return (0, "", "") + try: + cp = subprocess.run(cmd, check=False, text=True, + stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return (cp.returncode, cp.stdout.strip(), cp.stderr.strip()) + except FileNotFoundError: + return (127, "", f"{cmd[0]}: not found") + +def _exists_iface(name: str) -> bool: + rc, _, _ = _run(["ip", "-o", "link", "show", "dev", name]) + return rc == 0 + +def _systemd_present() -> bool: + return shutil.which("systemctl") is not None + +def _wg_quick_present() -> bool: + return shutil.which("wg-quick") is not None + +# ---------- helpers (routing tables & rules) ---------- + +def _rt_table_num_for_name(name: str) -> Optional[int]: + if not RT_TABLES_FILE.exists(): + return None + try: + text = RT_TABLES_FILE.read_text() + except Exception: + return None + for line in text.splitlines(): + s = line.strip() + if not s or s.startswith("#"): + continue + parts = s.split() + if len(parts) >= 2 and parts[0].isdigit(): + num = int(parts[0]); nm = parts[1] + if nm == name: + return num + return None + +_RULE_RE = re.compile(r"""^\s*(\d+):\s*(.+?)\s*$""") + +def _current_rule_lines() -> List[Tuple[int,str]]: + rc, out, _ = _run(["ip", "-4", "rule", "show"]) + if rc != 0 or not out: + return [] + rows: List[Tuple[int,str]] = [] + for ln in out.splitlines(): + m = _RULE_RE.match(ln) + if not m: + continue + pref = int(m.group(1)) + rest = m.group(2) + rows.append((pref, rest)) + return rows + +def _prefs_matching_lookups(lookups: Sequence[str]) -> Set[int]: + toks = [t for t in lookups if t] + prefs: Set[int] = set() + if not toks: + return prefs + for pref, rest in _current_rule_lines(): + for t in toks: + if re.search(rf"\blookup\s+{re.escape(t)}\b", rest): + prefs.add(pref) + break + return prefs + +def _rule_del_by_pref(pref: int, logs: List[str], dry: bool) -> None: + rc, _out, err = _run(["ip", "-4", "rule", "del", "pref", str(pref)], dry=dry) + if rc == 0: + logs.append(f"ip rule: deleted pref {pref}") + else: + logs.append(f"ip rule: delete pref {pref} (ignored): {err or f'rc={rc}'}") + +def _flush_routes_for_table(table: str, logs: List[str], dry: bool) -> None: + rc, _out, err = _run(["ip", "-4", "route", "flush", "table", table], dry=dry) + if rc == 0: + logs.append(f"ip route: flushed table {table}") + else: + logs.append(f"ip route: flush table {table} (ignored): {err or f'rc={rc}'}") + +def _addr_del_all_v4_on_iface(iface: str, logs: List[str], dry: bool) -> None: + rc, out, err = _run(["ip", "-4", "-o", "addr", "show", "dev", iface], dry=dry) + if rc != 0: + logs.append(f"ip addr: list on {iface} (ignored): {err or f'rc={rc}'}") + return + cidrs: List[str] = [] + for ln in out.splitlines(): + parts = ln.split() + if len(parts) >= 4: + cidrs.append(parts[3]) + if not cidrs: + logs.append("ip addr: none to remove") + return + for cidr in cidrs: + rc2, _o2, e2 = _run(["ip", "-4", "addr", "del", cidr, "dev", iface], dry=dry) + if rc2 == 0: + logs.append(f"ip addr: deleted {cidr}") + else: + logs.append(f"ip addr: delete {cidr} (ignored): {e2 or f'rc={rc2}'}") + +# ---------- business ---------- + +def _clean_iface_ip_state(name: str, logs: List[str], *, dry: bool=False, aggressive: bool=False) -> None: + tokens: List[str] = [name] + num = _rt_table_num_for_name(name) + if num is not None: + tokens.append(str(num)) + + # Delete rules matching either numeric or named lookup tokens; loop to catch chains. + deleted_any = True + safety = 0 + while deleted_any and safety < 10: + safety += 1 + prefs = sorted(_prefs_matching_lookups(tokens)) + if not prefs: + deleted_any = False + break + for p in prefs: + _rule_del_by_pref(p, logs, dry) + if aggressive: + for p in range(17000, 17060): + _rule_del_by_pref(p, logs, dry) + + # Flush routes in the table by name and numeric (if known) + _flush_routes_for_table(name, logs, dry) + if num is not None: + _flush_routes_for_table(str(num), logs, dry) + + # Remove all IPv4 addresses on the iface + _addr_del_all_v4_on_iface(name, logs, dry) + +def stop_clean_ifaces( + ifaces: Sequence[str], + use_systemd: bool = True, + use_wg_quick: bool = True, + do_clean: bool = True, + aggressive: bool = False, + dry_run: bool = False, +) -> List[str]: + logs: List[str] = [] + if not ifaces: + raise RuntimeError("no interfaces provided") + + have_systemd = _systemd_present() + have_wgquick = _wg_quick_present() + + for name in ifaces: + logs.append(f"== {name} ==") + + if use_systemd and have_systemd: + unit = f"wg-quick@{name}.service" + rc, out, err = _run(["systemctl", "stop", unit], dry=dry_run) + if rc == 0: + logs.append(f"systemctl: stopped {unit}") + else: + msg = err or out or f"rc={rc}" + logs.append(f"systemctl: stop {unit} (ignored): {msg}") + elif use_systemd and not have_systemd: + logs.append("systemctl: not found; skipped") + + if use_wg_quick and have_wgquick: + rc, out, err = _run(["wg-quick", "down", name], dry=dry_run) + if rc == 0: + logs.append("wg-quick: down ok") + else: + msg = err or out or f"rc={rc}" + logs.append(f"wg-quick: down (ignored): {msg}") + elif use_wg_quick and not have_wgquick: + logs.append("wg-quick: not found; skipped") + + if do_clean: + _clean_iface_ip_state(name, logs, dry=dry_run, aggressive=aggressive) + else: + logs.append("clean: skipped (--no-clean)") + + if _exists_iface(name): + rc, out, err = _run(["ip", "link", "del", "dev", name], dry=dry_run) + if rc == 0: + logs.append("ip link: deleted device") + else: + msg = err or out or f"rc={rc}" + logs.append(f"ip link: delete (ignored): {msg}") + else: + logs.append("ip link: device not present; nothing to delete") + + final_present = _exists_iface(name) + logs.append(f"status: {'gone' if not final_present else 'still present'}") + logs.append("") + + return logs + +# ---------- CLI (wrapper with aggregated errors) ---------- + +def main(argv: Sequence[str] | None = None) -> int: + ap = argparse.ArgumentParser( + description="Stop one or more WireGuard interfaces and clean IP state.", + add_help=True) + ap.add_argument("ifaces", nargs="*", help="interface names to stop (e.g., x6 US)") + ap.add_argument("--no-systemd", action="store_true", help="do not call systemctl stop wg-quick@IFACE") + ap.add_argument("--no-wg-quick", action="store_true", help="do not call wg-quick down IFACE") + ap.add_argument("--no-clean", action="store_true", help="skip IP cleanup (rules/routes/addresses)") + ap.add_argument("--aggressive", action="store_true", help="also purge common rule pref window (17000-17059)") + ap.add_argument("--dry-run", action="store_true", help="print what would be done without changing state") + ap.add_argument("--force-nonroot", action="store_true", help="allow running without root (best-effort)") + + args = ap.parse_args(argv) + + # Aggregate invocation errors + errors: List[str] = [] + if os.geteuid() != 0 and not args.force_nonroot: + errors.append("must run as root (use --force-nonroot to override)") + if not args.ifaces: + errors.append("no interfaces provided") + + if errors: + sys.stderr.write(ap.format_usage()) + prog = Path(sys.argv[0]).name or "stop_clean_iface.py" + sys.stderr.write(f"{prog}: error: " + "; ".join(errors) + "\n") + return 2 + + try: + logs = stop_clean_ifaces( + args.ifaces, + use_systemd=(not args.no_systemd), + use_wg_quick=(not args.no_wg_quick), + do_clean=(not args.no_clean), + aggressive=args.aggressive, + dry_run=args.dry_run, + ) + for line in logs: + print(line) + return 0 + except Exception as e: + print(f"error: {e}", file=sys.stderr) + return 2 + +if __name__ == "__main__": + sys.exit(main(sys.argv[1:])) diff --git a/developer/tunnel-client/todo.org b/developer/tunnel-client/todo.org new file mode 100644 index 0000000..46a1a41 --- /dev/null +++ b/developer/tunnel-client/todo.org @@ -0,0 +1,73 @@ +n#+TITLE: subu / WireGuard — TODO +#+AUTHOR: Thomas & Nerith (session) +#+LANGUAGE: en +#+OPTIONS: toc:2 num:t +#+TODO: TODO(t) NEXT(n) WAITING(w) BLOCKED(b) | DONE(d) CANCELED(c) + +- Your current DB schema (the one you pasted earlier) does not include a listen-port field on Iface. So if you want ListenPort = … to be driven from the DB, add a column like Iface.listen_port INTEGER CHECK(listen_port BETWEEN 1 AND 65535). + +- have the stage commands echo relative pathnames instead of absolute as they do now. + +- the one private key pair per client (instead of per interface), turns out to be a bad idea, as we can't manage tunnels individually, say, by revoking keys. We need to move to a key pair per interface instead. + +- db_wipe needs to delete the key directory contents also + +------------------------------- + +- Known gaps / open decisions + - Systemd drop-in to call staged scripts on ~wg-quick@IFACE~ up (IPv4 addrs + policy rules). + - Staged policy-rules script (source-based + uidrange rules) to replace the old global ~IP_rule_add.sh~ usage. + - Installer flow & atomic writes (copy staged files, set owner/perms; safe update of ~/etc/iproute2/rt_tables~). + - Pool size policy: default /16 with /32 hosts is implemented; decision pending on /8 vs /16. + - Style guardrails (RT commas / two-space indent) are manual; optional linter TBD. + +* NEXT wiring (high-level order) +1) Stage: /etc/iproute2/rt_tables (merge) for selected ifaces. +2) Stage: /usr/local/bin/set_iface_ipv4_addrs.sh for same ifaces. +3) Stage: /usr/local/bin/set_policy_rules_for_ifaces.sh (new; replaces old global add tool). +4) Stage: systemd drop-ins for ~wg-quick@IFACE.service.d/10-postup.conf~ to call (2) then (3). +5) Install: copy staged files → system, set perms/owner; ~systemctl daemon-reload~. +6) Bring-up: ~wg-quick up IFACE~; verify routes/rules; smoke tests. + +* TODO Add “missing-iface” guard to staged IPv4 script +- When iface doesn’t exist yet, log and continue (no non-zero exit). + +* TODO Stage policy rules script (idempotent) +- For each iface: + - Source-based rule: =from lookup =. + - UID rules: =uidrange U-U lookup = for each bound UID. +- Only for ifaces passed on the CLI; DB-driven; no kernel writes here. +- Emit with checks (skip if grep finds the exact rule). + +* TODO Systemd drop-in generator +- Emit to: ~stage/etc/systemd/wg-quick@IFACE.service.d/10-postup.conf~. +- Include: + - =ExecStartPre=-/usr/sbin/ip link delete IFACE= (clean stale link). + - =ExecStartPost=+/usr/local/bin/set_iface_ipv4_addrs.sh=. + - =ExecStartPost=+/usr/local/bin/set_policy_rules_for_ifaces.sh=. + - =ExecStartPost=+/usr/bin/logger 'wg-quick@IFACE up: addrs+rules applied'=. + +* TODO Installer flow +- Copy staged files with perms (0500 for scripts; 0644 for rt_tables; 0755 for dirs). +- Atomic update for ~/etc/iproute2/rt_tables~ (write temp + move); keep timestamped backup. +- ~systemctl daemon-reload~ after installing drop-ins. + +* WAITING Decide “no-op staging” policy for rt_tables +- Option A: Always stage a copy (deterministic deployment). +- Option B: Stage only when there are new entries (quieter diffs). + +* TODO Tests +- Unit-ish: parse/plan functions for both staging scripts (dry-run cases, collisions, skip-missing cases). +- Integration: + - Create temp WG iface: ~ip link add dev t0 type wireguard~ (and delete after). + - Run staged scripts; verify ~ip -4 addr show dev t0~, ~ip rule show~, ~ip route show table ~. + - Bring up real ~wg-quick up x6~; repeat verifications. + +* TODO Docs +- Append “operational runbook” to the org manual (bring-up, verify, recover, teardown). + +* DONE What’s already proven by commands (from log) +- all db_init is running, orchestrated by db_init_StanleyPark +- =stage_rt_tables_merge.py --from-db x6 US= created staged rt_tables with merges. +- =stage_iface_ipv4_script.py x6 US= staged ~set_iface_ipv4_addrs.sh~. + diff --git a/developer/tunnel-client/wg_keys_incommon.py b/developer/tunnel-client/wg_keys_incommon.py new file mode 100644 index 0000000..1578899 --- /dev/null +++ b/developer/tunnel-client/wg_keys_incommon.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# wg_keys_incommon.py — predicates + actuators for WG keypairs + +from __future__ import annotations +import shutil, subprocess, sqlite3 + +def wellformed_client_keypair(conn: sqlite3.Connection, iface: str) -> bool: + """Predicate: True iff client IFACE has a syntactically valid WG keypair.""" + row = conn.execute( + "SELECT private_key, public_key FROM Iface WHERE iface=? LIMIT 1;", (iface,) + ).fetchone() + if not row: return False + priv, pub = (row[0] or ""), (row[1] or "") + return (43 <= len(priv.strip()) <= 45) and (43 <= len(pub.strip()) <= 45) + +def generate_client_keypair_if_missing(conn: sqlite3.Connection, iface: str) -> bool: + """ + Actuator: if IFACE lacks a well-formed keypair, generate one with `wg`, + store it in the DB, and return True. Return False if nothing changed. + """ + if wellformed_client_keypair(conn, iface): + return False + if not shutil.which("wg"): + raise RuntimeError("wg not found; cannot generate keys") + gen = subprocess.run(["wg","genkey"], capture_output=True, text=True, check=True) + priv = gen.stdout.strip() + pubp = subprocess.run(["wg","pubkey"], input=priv.encode(), capture_output=True, check=True) + pub = pubp.stdout.decode().strip() + conn.execute( + "UPDATE Iface SET private_key=?, public_key=?, " + "updated_at=strftime('%Y-%m-%dT%H:%M:%SZ','now') WHERE iface=?", + (priv, pub, iface), + ) + return True diff --git a/developer/tunnel-server/set_client_key.sh b/developer/tunnel-server/set_client_key.sh new file mode 100755 index 0000000..9e28f6b --- /dev/null +++ b/developer/tunnel-server/set_client_key.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# set_client_key.sh — replace/set a client's public key on the server +# Usage: set_client_key.sh [allowed-ips=10.8.0.2/32] [iface=wg0] +# Example: set_client_key.sh 88gTdpESSwAc0iip6tVotc8/taZErY18n3lzrgAd+XY= 10.8.0.2/32 wg0 + +set -euo pipefail + +PUB="${1:-}" +ALLOWED="${2:-10.8.0.2/32}" +IFACE="${3:-wg0}" +CFG="/etc/wireguard/${IFACE}.conf" + +[[ $EUID -eq 0 ]] || { echo "❌ Must be run as root."; exit 1; } +command -v wg >/dev/null || { echo "❌ wg not found."; exit 1; } +command -v wg-quick >/dev/null || { echo "❌ wg-quick not found."; exit 1; } + +[[ -n "$PUB" ]] || { echo "Usage: $0 [allowed-ips] [iface]"; exit 2; } +# quick sanity on key length +kl=${#PUB}; [[ $kl -ge 43 && $kl -le 45 ]] || { echo "❌ Public key length looks wrong."; exit 2; } +[[ -f "$CFG" ]] || { echo "❌ Config not found: $CFG"; exit 1; } + +# Require the interface to be up (simplest, reliable path) +if ! wg show "$IFACE" >/dev/null 2>&1; then + echo "❌ Interface $IFACE is not up. Start it first: wg-quick up $IFACE" + echo " Or stop it and edit $CFG manually (replace the peer that has AllowedIPs = $ALLOWED)." + exit 1 +fi + +# Remove any existing peer that currently owns the same AllowedIPs (typical /32 per client) +while read -r oldkey oldips; do + if [[ "$oldips" == "$ALLOWED" ]]; then + echo "→ Removing existing peer $oldkey with AllowedIPs $ALLOWED" + wg set "$IFACE" peer "$oldkey" remove || true + fi +done < <(wg show "$IFACE" allowed-ips | awk '{print $1, $2}') + +# Add the new peer +wg set "$IFACE" peer "$PUB" allowed-ips "$ALLOWED" + +# Persist runtime state back to the config (works great even if SaveConfig=true) +wg-quick save "$IFACE" + +echo "✔ Updated $IFACE: set peer $PUB with AllowedIPs $ALLOWED and saved to $CFG" +wg show "$IFACE" diff --git a/developer/tunnel-server/setup.sh b/developer/tunnel-server/setup.sh new file mode 100755 index 0000000..eee81ce --- /dev/null +++ b/developer/tunnel-server/setup.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash +# 2025-09-05 +# Debian 12 Setup: WireGuard egress server + one client (safe/idempotent) +set -euo pipefail +umask 0077 +[[ $EUID -eq 0 ]] || { echo "❌ run as root"; exit 1; } +run(){ echo "+ $*"; eval "$@"; } + +WG_IF="wg0" +WG_PORT="${WG_PORT:-51820}" +WG_DIR="/etc/wireguard" +CLIENT_DIR="/root/wireguard" +CLIENT_NAME="${CLIENT_NAME:-client1}" + +SERVER_NET_V4="${SERVER_NET_V4:-10.8.0.0/24}" +SERVER_ADDR_V4="${SERVER_ADDR_V4:-10.8.0.1/24}" +CLIENT_ADDR_V4="${CLIENT_ADDR_V4:-10.8.0.2/32}" + +# --- Packages --- +need_pkgs=() +for p in wireguard qrencode iproute2; do command -v ${p%% *} >/dev/null 2>&1 || need_pkgs+=("$p"); done +if ((${#need_pkgs[@]})); then + DEBIAN_FRONTEND=noninteractive run apt-get update + run apt-get install -y "${need_pkgs[@]}" +fi + +install -d -m 0700 "$WG_DIR" "$CLIENT_DIR" + +# --- Detect WAN IF + public IPv4 --- +WAN_IF=$(ip -o -4 route show to default | awk '{print $5; exit}') +[[ -n "${WAN_IF:-}" ]] || { echo "❌ Could not detect WAN interface"; exit 1; } +SERVER_IPv4=$(ip -o -4 addr show dev "$WAN_IF" | awk '{print $4}' | cut -d/ -f1 | head -n1) +[[ -n "${SERVER_IPv4:-}" ]] || SERVER_IPv4="" + +# --- Keys (server) --- +if [[ ! -f "$WG_DIR/server.key" ]]; then + (umask 077; wg genkey | tee "$WG_DIR/server.key" | wg pubkey > "$WG_DIR/server.pub") + chmod 600 "$WG_DIR/server.key" +fi +SERVER_PRIV=$(cat "$WG_DIR/server.key") +SERVER_PUB=$(cat "$WG_DIR/server.pub") + +# --- Keys (client) --- +if [[ ! -f "$CLIENT_DIR/${CLIENT_NAME}.key" ]]; then + (umask 077; wg genkey | tee "$CLIENT_DIR/${CLIENT_NAME}.key" | wg pubkey > "$CLIENT_DIR/${CLIENT_NAME}.pub") + chmod 600 "$CLIENT_DIR/${CLIENT_NAME}.key" +fi +CLIENT_PRIV=$(cat "$CLIENT_DIR/${CLIENT_NAME}.key") +CLIENT_PUB=$(cat "$CLIENT_DIR/${CLIENT_NAME}.pub") + +# --- IPv4 forwarding --- +install -d -m 0755 /etc/sysctl.d +cat > /etc/sysctl.d/99-wireguard-forwarding.conf <<'EOF' +net.ipv4.ip_forward=1 +# net.ipv6.conf.all.forwarding=1 +EOF +sysctl --system >/dev/null + +# --- Write server config (backup if existing) --- +CFG="$WG_DIR/${WG_IF}.conf" +if [[ -f "$CFG" ]]; then + cp -a "$CFG" "$CFG.bak.$(date -u +%Y%m%dT%H%M%SZ)" +fi +cat > "$CFG" < "$CLIENT_CFG" </dev/null 2>&1 && ufw status | grep -q "Status: active"; then + ufw status | grep -q "^${WG_PORT}/udp" || ufw allow "${WG_PORT}/udp" || true +fi + +# --- Enable interface --- +run systemctl enable --now wg-quick@"$WG_IF" + +# --- Status + QR --- +echo +wg show "$WG_IF" || true +echo +echo "Client file: $CLIENT_CFG" +command -v qrencode >/dev/null 2>&1 && { echo "QR (WireGuard mobile import):"; qrencode -t ansiutf8 < "$CLIENT_CFG"; } +echo +echo "If Endpoint autodetection is wrong, edit it to your public IP or DNS."