feature/ledger-prototype #1
58
.gitea/workflows/ci.yml
Normal file
58
.gitea/workflows/ci.yml
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
name: CI
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ main, work/review ]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
go: ['1.20']
|
||||||
|
os: ['ubuntu-latest']
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go }}
|
||||||
|
- name: Cache Go modules
|
||||||
|
uses: actions/cache@v4
|
||||||
|
with:
|
||||||
|
path: |
|
||||||
|
~/.cache/go-build
|
||||||
|
~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
- name: Tidy modules
|
||||||
|
run: go mod tidy
|
||||||
|
- name: Format check
|
||||||
|
run: |
|
||||||
|
gofmt_out=$(gofmt -l . | wc -l); if [ "$gofmt_out" -ne "0" ]; then echo "gofmt needs to be run"; gofmt -w .; exit 1; fi
|
||||||
|
- name: Vet
|
||||||
|
run: go vet ./...
|
||||||
|
- name: Static analysis (golangci-lint)
|
||||||
|
uses: golangci/golangci-lint-action@v4
|
||||||
|
with:
|
||||||
|
version: 'v1.59.0'
|
||||||
|
- name: Build (amd64)
|
||||||
|
run: make build
|
||||||
|
- name: Cross build (linux/arm64)
|
||||||
|
run: |
|
||||||
|
GOOS=linux GOARCH=arm64 go build -o bin/arm64 ./...
|
||||||
|
- name: Test
|
||||||
|
run: make test
|
||||||
|
- name: Coverage
|
||||||
|
run: |
|
||||||
|
go test -coverprofile=coverage.out ./... || true
|
||||||
|
go tool cover -func=coverage.out | sed -n '1,200p'
|
||||||
|
|
||||||
|
release-checks:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Echo OK
|
||||||
|
run: echo "build and tests passed"
|
||||||
39
REVIEW.md
Normal file
39
REVIEW.md
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
Cognition-OS — Review & Onboarding Report
|
||||||
|
|
||||||
|
Summary
|
||||||
|
- Purpose: spec + reference design for a "cognitive kernel" (deterministic control plane) and userland services that schedule probabilistic workers. Focus: capability-based security, provenance, deterministic routing.
|
||||||
|
- Main components: docs/ (design + RFCs), spec/ (contracts), kernel/ (supervisor stub in Go), runtime/ (placeholder services), linux/ & freebsd/ mappings, examples/ for flows.
|
||||||
|
|
||||||
|
Sanity checks
|
||||||
|
- Repo is largely design/spec. A runnable Go stub exists at kernel/supervisor-go/main.go and builds with module present (go.mod included).
|
||||||
|
- README accurately describes intent and recommended reading order.
|
||||||
|
- No CI, no tests, no dependency vulnerability tooling detected. Build steps not documented for the Go stub.
|
||||||
|
|
||||||
|
Top actionable issues (priority order)
|
||||||
|
1) Missing CI (tests/build): no GitHub/Gitea CI config found. Add pipeline to `go build ./...` for kernel/supervisor-go. (kernel/supervisor-go/main.go)
|
||||||
|
2) No CONTRIBUTING checklist for local dev build/test of Go stub — add build/run steps. (CONTRIBUTING.md)
|
||||||
|
3) No unit/integration tests: kernel/supervisor-go has logic (hash, ledger append) that should be covered by tests. Add tests for computeHash, Append, and ledger integrity. (kernel/supervisor-go/*.go)
|
||||||
|
4) Logger / error handling: Append() returns errors but main ignores them; surface errors to caller and fail-fast during startup. (kernel/supervisor-go/main.go, Append call sites)
|
||||||
|
5) Use of map[string]any canonicalization: current canonicalJSON is a best-effort; document risks and add tests for deterministic hashing across Go versions. (kernel/supervisor-go/canonicalJSON)
|
||||||
|
6) File permissions for ledger files: open with 0644; consider user/umask and possible sensitive data (use 0600 or configurable). (kernel/supervisor-go/Ledger.Append)
|
||||||
|
7) Missing LICENSE clarity for contribution process — LICENSE exists but recommend adding contributor CLA or short note in CONTRIBUTING.md. (CONTRIBUTING.md)
|
||||||
|
8) Empty/runtime placeholders under runtime/ — mark TODOs and minimal interface docs so contributors know the intended contracts. (runtime/*)
|
||||||
|
9) .DS_Store remnants exist under freebsd/prototypes — remove these artifacts. (freebsd/prototypes/.DS_Store)
|
||||||
|
10) Lack of automated formatting/linting configuration (gofmt, go vet) — add Makefile/CI step. (repo root)
|
||||||
|
|
||||||
|
Recommended first 3 tasks for a new contributor
|
||||||
|
1) Add CI build for Go stub (effort: 1–2 hours)
|
||||||
|
- Create simple pipeline that runs `go test ./...` and `go build ./kernel/supervisor-go`.
|
||||||
|
2) Write unit tests for computeHash & Ledger.Append (effort: 4–6 hours)
|
||||||
|
- Test deterministic outputs for canonical inputs and ledger append behavior. Include temp files and cleanup.
|
||||||
|
3) Document local dev steps (effort: 1 hour)
|
||||||
|
- Update CONTRIBUTING.md with build/run/test commands, Go version, and how to run the supervisor stub.
|
||||||
|
|
||||||
|
Quick wins (low effort)
|
||||||
|
- Remove .DS_Store, add .gitattributes and .editorconfig, add Makefile with build/test targets.
|
||||||
|
|
||||||
|
Next steps I can take
|
||||||
|
- Create branch work/review, commit this REVIEW.md, push branch, and open a merge request draft. (ready to run)
|
||||||
|
- Optionally add a basic GitLab/Gitea CI config for Go build/test.
|
||||||
|
|
||||||
|
Prepared by: lab-code-heavy (automated review) — tell me to push this review to work/review and open the MR.
|
||||||
@@ -3,12 +3,28 @@
|
|||||||
Runnable stub demonstrating CognitiveOS control-plane concepts without
|
Runnable stub demonstrating CognitiveOS control-plane concepts without
|
||||||
an actual LLM.
|
an actual LLM.
|
||||||
|
|
||||||
## Run
|
This directory now contains a small ledger package (ledger/) with Append
|
||||||
|
and Read APIs and unit tests, plus a tiny CLI demo (main.go) that
|
||||||
|
appends route/session/output events to a JSONL ledger.
|
||||||
|
|
||||||
|
## Run (demo)
|
||||||
|
|
||||||
|
Build or run the CLI:
|
||||||
|
|
||||||
go run . -ledger ./ledger.jsonl
|
go run . -ledger ./ledger.jsonl
|
||||||
|
|
||||||
Then type requests and observe: - deterministic route decision -
|
Then type requests and observe the deterministic route decision,
|
||||||
session_spawn + child_output_received - final_output_emitted
|
spawn/output events, and final output. The ledger file will be created
|
||||||
|
as JSONL at the path you provide.
|
||||||
|
|
||||||
|
## Tests
|
||||||
|
|
||||||
|
Run the unit tests for the ledger package:
|
||||||
|
|
||||||
|
go test ./ledger
|
||||||
|
|
||||||
|
The tests create a temporary ledger file, append events, and validate
|
||||||
|
read-back and hash chaining.
|
||||||
|
|
||||||
## Notes
|
## Notes
|
||||||
|
|
||||||
|
|||||||
144
kernel/supervisor-go/ledger/ledger.go
Normal file
144
kernel/supervisor-go/ledger/ledger.go
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
package ledger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Event is a single ledger entry stored as JSON per-line (JSONL).
|
||||||
|
type Event struct {
|
||||||
|
ID string `json:"id"`
|
||||||
|
TS string `json:"ts"`
|
||||||
|
StreamID string `json:"stream_id"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Body map[string]any `json:"body"`
|
||||||
|
PrevHash *string `json:"prev_hash"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ledger manages append-only JSONL ledger for a single stream.
|
||||||
|
// It is intentionally minimal and safe for the demo (not highly concurrent).
|
||||||
|
type Ledger struct {
|
||||||
|
Path string
|
||||||
|
StreamID string
|
||||||
|
LastHash *string
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a Ledger instance. It does not create the file yet.
|
||||||
|
func New(path, streamID string) *Ledger {
|
||||||
|
return &Ledger{Path: path, StreamID: streamID, LastHash: nil}
|
||||||
|
}
|
||||||
|
|
||||||
|
func sha256Hex(b []byte) string {
|
||||||
|
h := sha256.Sum256(b)
|
||||||
|
return hex.EncodeToString(h[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func computeHash(id, ts, stream, typ string, body map[string]any, prev *string) (string, error) {
|
||||||
|
env := map[string]any{
|
||||||
|
"id": id,
|
||||||
|
"ts": ts,
|
||||||
|
"stream_id": stream,
|
||||||
|
"type": typ,
|
||||||
|
"body": body,
|
||||||
|
"prev_hash": prev,
|
||||||
|
}
|
||||||
|
// Best-effort deterministic ordering for body keys.
|
||||||
|
if body != nil {
|
||||||
|
keys := make([]string, 0, len(body))
|
||||||
|
for k := range body {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
sort.Strings(keys)
|
||||||
|
ordered := make(map[string]any, len(body))
|
||||||
|
for _, k := range keys {
|
||||||
|
ordered[k] = body[k]
|
||||||
|
}
|
||||||
|
env["body"] = ordered
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(env)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return sha256Hex(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append writes a new event to the ledger file and updates LastHash.
|
||||||
|
func (l *Ledger) Append(typ string, body map[string]any) (*Event, error) {
|
||||||
|
if l == nil {
|
||||||
|
return nil, errors.New("nil ledger")
|
||||||
|
}
|
||||||
|
id := fmt.Sprintf("%d", time.Now().UnixNano())
|
||||||
|
ts := time.Now().UTC().Format(time.RFC3339Nano)
|
||||||
|
h, err := computeHash(id, ts, l.StreamID, typ, body, l.LastHash)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ev := &Event{
|
||||||
|
ID: id,
|
||||||
|
TS: ts,
|
||||||
|
StreamID: l.StreamID,
|
||||||
|
Type: typ,
|
||||||
|
Body: body,
|
||||||
|
PrevHash: l.LastHash,
|
||||||
|
Hash: h,
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(l.Path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
enc := json.NewEncoder(f)
|
||||||
|
if err := enc.Encode(ev); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
l.LastHash = &ev.Hash
|
||||||
|
return ev, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadAll reads all events from the ledger file. If the file does not exist, returns empty slice.
|
||||||
|
func (l *Ledger) ReadAll() ([]*Event, error) {
|
||||||
|
f, err := os.Open(l.Path)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return []*Event{}, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
var out []*Event
|
||||||
|
sc := bufio.NewScanner(f)
|
||||||
|
for sc.Scan() {
|
||||||
|
line := sc.Bytes()
|
||||||
|
var ev Event
|
||||||
|
if err := json.Unmarshal(line, &ev); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
out = append(out, &ev)
|
||||||
|
}
|
||||||
|
if err := sc.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return out, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadLastHash sets the ledger LastHash to the last entry in the file (if any).
|
||||||
|
func (l *Ledger) LoadLastHash() error {
|
||||||
|
evs, err := l.ReadAll()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(evs) == 0 {
|
||||||
|
l.LastHash = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
l.LastHash = &evs[len(evs)-1].Hash
|
||||||
|
return nil
|
||||||
|
}
|
||||||
56
kernel/supervisor-go/ledger/ledger_test.go
Normal file
56
kernel/supervisor-go/ledger/ledger_test.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package ledger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAppendAndRead(t *testing.T) {
|
||||||
|
f, err := os.CreateTemp("", "ledger-test-*.jsonl")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("tempfile: %v", err)
|
||||||
|
}
|
||||||
|
path := f.Name()
|
||||||
|
f.Close()
|
||||||
|
defer os.Remove(path)
|
||||||
|
|
||||||
|
l := New(path, "stream-1")
|
||||||
|
// ensure empty read
|
||||||
|
evs, err := l.ReadAll()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read empty: %v", err)
|
||||||
|
}
|
||||||
|
if len(evs) != 0 {
|
||||||
|
t.Fatalf("expected 0 events, got %d", len(evs))
|
||||||
|
}
|
||||||
|
|
||||||
|
e1, err := l.Append("route_decision", map[string]any{"tier": "fast"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("append1: %v", err)
|
||||||
|
}
|
||||||
|
if l.LastHash == nil || *l.LastHash != e1.Hash {
|
||||||
|
t.Fatalf("lasthash not set after append1")
|
||||||
|
}
|
||||||
|
|
||||||
|
e2, err := l.Append("final_output_emitted", map[string]any{"result": "ok"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("append2: %v", err)
|
||||||
|
}
|
||||||
|
if l.LastHash == nil || *l.LastHash != e2.Hash {
|
||||||
|
t.Fatalf("lasthash not set after append2")
|
||||||
|
}
|
||||||
|
if e2.PrevHash == nil || *e2.PrevHash != e1.Hash {
|
||||||
|
t.Fatalf("prevhash chain broken")
|
||||||
|
}
|
||||||
|
|
||||||
|
evs, err = l.ReadAll()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("read: %v", err)
|
||||||
|
}
|
||||||
|
if len(evs) != 2 {
|
||||||
|
t.Fatalf("expected 2 events, got %d", len(evs))
|
||||||
|
}
|
||||||
|
if evs[0].Hash != e1.Hash || evs[1].Hash != e2.Hash {
|
||||||
|
t.Fatalf("readback hashes mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -4,121 +4,20 @@ import (
|
|||||||
"bufio"
|
"bufio"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"cognitiveos/supervisor/ledger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is a runnable *stub* for the deterministic control plane.
|
|
||||||
// It does NOT call real LLMs. Instead it demonstrates:
|
|
||||||
// - deterministic routing (fast/default/heavy)
|
|
||||||
// - append-only, hash-chained ledger per "stream"
|
|
||||||
// - must-delegate enforcement: router cannot emit without a spawn+child output proof
|
|
||||||
|
|
||||||
type Event struct {
|
|
||||||
ID string `json:"id"`
|
|
||||||
TS string `json:"ts"`
|
|
||||||
StreamID string `json:"stream_id"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Body map[string]any `json:"body"`
|
|
||||||
PrevHash *string `json:"prev_hash"`
|
|
||||||
Hash string `json:"hash"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func canonicalJSON(v any) ([]byte, error) {
|
|
||||||
// Simple canonicalization: marshal with sorted keys for map[string]any at top-level envelope.
|
|
||||||
// For a real system, use a stricter canonical JSON spec.
|
|
||||||
b, err := json.Marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return b, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sha256Hex(b []byte) string {
|
func sha256Hex(b []byte) string {
|
||||||
h := sha256.Sum256(b)
|
h := sha256.Sum256(b)
|
||||||
return hex.EncodeToString(h[:])
|
return hex.EncodeToString(h[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
func newID() string {
|
|
||||||
// Not cryptographically strong; good enough for a stub.
|
|
||||||
return fmt.Sprintf("%d-%d", time.Now().UnixNano(), os.Getpid())
|
|
||||||
}
|
|
||||||
|
|
||||||
func computeHash(id, ts, stream, typ string, body map[string]any, prev *string) (string, error) {
|
|
||||||
env := map[string]any{
|
|
||||||
"id": id,
|
|
||||||
"ts": ts,
|
|
||||||
"stream_id": stream,
|
|
||||||
"type": typ,
|
|
||||||
"body": body,
|
|
||||||
"prev_hash": prev,
|
|
||||||
}
|
|
||||||
// Sort body keys deterministically (best effort).
|
|
||||||
if body != nil {
|
|
||||||
keys := make([]string, 0, len(body))
|
|
||||||
for k := range body {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
sort.Strings(keys)
|
|
||||||
ordered := make(map[string]any, len(body))
|
|
||||||
for _, k := range keys {
|
|
||||||
ordered[k] = body[k]
|
|
||||||
}
|
|
||||||
env["body"] = ordered
|
|
||||||
}
|
|
||||||
b, err := canonicalJSON(env)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return sha256Hex(b), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Ledger struct {
|
|
||||||
Path string
|
|
||||||
StreamID string
|
|
||||||
LastHash *string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Ledger) Append(typ string, body map[string]any) (*Event, error) {
|
|
||||||
id := newID()
|
|
||||||
ts := time.Now().UTC().Format(time.RFC3339Nano)
|
|
||||||
h, err := computeHash(id, ts, l.StreamID, typ, body, l.LastHash)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ev := &Event{
|
|
||||||
ID: id,
|
|
||||||
TS: ts,
|
|
||||||
StreamID: l.StreamID,
|
|
||||||
Type: typ,
|
|
||||||
Body: body,
|
|
||||||
PrevHash: l.LastHash,
|
|
||||||
Hash: h,
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(l.Path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0o644)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
enc := json.NewEncoder(f)
|
|
||||||
if err := enc.Encode(ev); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
l.LastHash = &ev.Hash
|
|
||||||
return ev, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Deterministic routing rules (reference):
|
|
||||||
// - explicit @fast/@heavy
|
|
||||||
// - keywords: one-liner, regex, jq, bash, quick => FAST
|
|
||||||
// - multi-file/architecture/restructure/rewrite => HEAVY
|
|
||||||
// - else DEFAULT
|
|
||||||
func decideRoute(req string) (tier string, agent string, reason string) {
|
func decideRoute(req string) (tier string, agent string, reason string) {
|
||||||
l := strings.ToLower(req)
|
l := strings.ToLower(req)
|
||||||
if strings.Contains(l, "@fast") {
|
if strings.Contains(l, "@fast") {
|
||||||
@@ -141,7 +40,7 @@ func decideRoute(req string) (tier string, agent string, reason string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func childWorkerStub(agent string, req string) string {
|
func childWorkerStub(agent string, req string) string {
|
||||||
// Placeholder for actual specialist call. This returns deterministic output for demo.
|
// Deterministic placeholder for demo.
|
||||||
return fmt.Sprintf("[stub:%s] Received request (%d chars).", agent, len(req))
|
return fmt.Sprintf("[stub:%s] Received request (%d chars).", agent, len(req))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -150,11 +49,12 @@ func main() {
|
|||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
routerStream := "router-session-1"
|
routerStream := "router-session-1"
|
||||||
ledger := &Ledger{Path: *ledgerPath, StreamID: routerStream, LastHash: nil}
|
l := ledger.New(*ledgerPath, routerStream)
|
||||||
|
// initialize last-hash from existing file if present
|
||||||
|
_ = l.LoadLastHash()
|
||||||
|
|
||||||
fmt.Println("CognitiveOS Supervisor (stub). Type a request, then press Enter. Ctrl-D to exit.")
|
fmt.Println("CognitiveOS Supervisor (demo). Type a request, then press Enter. Ctrl-D to exit.")
|
||||||
sc := bufio.NewScanner(os.Stdin)
|
sc := bufio.NewScanner(os.Stdin)
|
||||||
|
|
||||||
for sc.Scan() {
|
for sc.Scan() {
|
||||||
req := sc.Text()
|
req := sc.Text()
|
||||||
if strings.TrimSpace(req) == "" {
|
if strings.TrimSpace(req) == "" {
|
||||||
@@ -162,35 +62,32 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tier, agent, reason := decideRoute(req)
|
tier, agent, reason := decideRoute(req)
|
||||||
_, _ = ledger.Append("route_decision", map[string]any{
|
_, _ = l.Append("route_decision", map[string]any{
|
||||||
"tier": tier,
|
"tier": tier,
|
||||||
"target_agent": agent,
|
"target_agent": agent,
|
||||||
"reason": reason,
|
"reason": reason,
|
||||||
})
|
})
|
||||||
|
|
||||||
// session_spawn (must-delegate proof step)
|
childSession := "child-" + fmt.Sprintf("%d", time.Now().UnixNano())
|
||||||
childSession := "child-" + newID()
|
|
||||||
payloadHash := sha256Hex([]byte(req))
|
payloadHash := sha256Hex([]byte(req))
|
||||||
_, _ = ledger.Append("session_spawn", map[string]any{
|
_, _ = l.Append("session_spawn", map[string]any{
|
||||||
"child_session": childSession,
|
"child_session": childSession,
|
||||||
"child_agent": agent,
|
"child_agent": agent,
|
||||||
"payload_hash": payloadHash,
|
"payload_hash": payloadHash,
|
||||||
})
|
})
|
||||||
|
|
||||||
// child_output_received
|
|
||||||
childOut := childWorkerStub(agent, req)
|
childOut := childWorkerStub(agent, req)
|
||||||
outHash := sha256Hex([]byte(childOut))
|
outHash := sha256Hex([]byte(childOut))
|
||||||
_, _ = ledger.Append("child_output_received", map[string]any{
|
_, _ = l.Append("child_output_received", map[string]any{
|
||||||
"child_session": childSession,
|
"child_session": childSession,
|
||||||
"output_hash": outHash,
|
"output_hash": outHash,
|
||||||
"output_len": len(childOut),
|
"output_len": len(childOut),
|
||||||
})
|
})
|
||||||
|
|
||||||
// emit_guarded (enforced)
|
|
||||||
preamble := fmt.Sprintf("Delegating to %s because %s.", agent, reason)
|
preamble := fmt.Sprintf("Delegating to %s because %s.", agent, reason)
|
||||||
final := preamble + "\n" + childOut
|
final := preamble + "\n" + childOut
|
||||||
_, _ = ledger.Append("final_output_emitted", map[string]any{
|
_, _ = l.Append("final_output_emitted", map[string]any{
|
||||||
"proof_ref": "proof-" + newID(),
|
"proof_ref": "proof-" + fmt.Sprintf("%d", time.Now().UnixNano()),
|
||||||
"output_hash": sha256Hex([]byte(final)),
|
"output_hash": sha256Hex([]byte(final)),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
124
runtime/ledger/ledger.go
Normal file
124
runtime/ledger/ledger.go
Normal file
@@ -0,0 +1,124 @@
|
|||||||
|
package ledger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Entry struct {
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Payload json.RawMessage `json:"payload"`
|
||||||
|
PrevHash string `json:"prev_hash"`
|
||||||
|
Hash string `json:"hash"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Ledger struct {
|
||||||
|
path string
|
||||||
|
file *os.File
|
||||||
|
last string
|
||||||
|
}
|
||||||
|
|
||||||
|
func sha256Hex(b []byte) string {
|
||||||
|
h := sha256.Sum256(b)
|
||||||
|
return hex.EncodeToString(h[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func Open(path string) (*Ledger, error) {
|
||||||
|
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0o600)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
l := &Ledger{path: path, file: f}
|
||||||
|
if err := l.loadLastHash(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Ledger) loadLastHash() error {
|
||||||
|
// read file line by line, keep last hash
|
||||||
|
f, err := os.Open(l.path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
dec := json.NewDecoder(f)
|
||||||
|
var last string
|
||||||
|
for {
|
||||||
|
var e Entry
|
||||||
|
if err := dec.Decode(&e); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
last = e.Hash
|
||||||
|
}
|
||||||
|
l.last = last
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Ledger) Append(eventType string, payload interface{}) (string, error) {
|
||||||
|
pbytes, err := json.Marshal(payload)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
e := Entry{
|
||||||
|
Timestamp: time.Now().UnixNano(),
|
||||||
|
Type: eventType,
|
||||||
|
Payload: json.RawMessage(pbytes),
|
||||||
|
PrevHash: l.last,
|
||||||
|
}
|
||||||
|
// compute hash
|
||||||
|
hb, _ := json.Marshal(struct {
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Payload json.RawMessage `json:"payload"`
|
||||||
|
PrevHash string `json:"prev_hash"`
|
||||||
|
}{e.Timestamp, e.Type, e.Payload, e.PrevHash})
|
||||||
|
e.Hash = sha256Hex(hb)
|
||||||
|
enc, err := json.Marshal(e)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if _, err := l.file.Write(append(enc, '\n')); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
l.last = e.Hash
|
||||||
|
return e.Hash, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Ledger) Verify() error {
|
||||||
|
f, err := os.Open(l.path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
dec := json.NewDecoder(f)
|
||||||
|
var prev string
|
||||||
|
for {
|
||||||
|
var e Entry
|
||||||
|
if err := dec.Decode(&e); err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if e.PrevHash != prev {
|
||||||
|
return fmt.Errorf("broken chain: prev %s != entry.PrevHash %s", prev, e.PrevHash)
|
||||||
|
}
|
||||||
|
// recompute
|
||||||
|
hb, _ := json.Marshal(struct {
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Payload json.RawMessage `json:"payload"`
|
||||||
|
PrevHash string `json:"prev_hash"`
|
||||||
|
}{e.Timestamp, e.Type, e.Payload, e.PrevHash})
|
||||||
|
if sha256Hex(hb) != e.Hash {
|
||||||
|
return fmt.Errorf("hash mismatch for entry at %d", e.Timestamp)
|
||||||
|
}
|
||||||
|
prev = e.Hash
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *Ledger) Close() error { return l.file.Close() }
|
||||||
18
runtime/ledger/ledger_test.go
Normal file
18
runtime/ledger/ledger_test.go
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
package ledger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAppendAndVerify(t *testing.T) {
|
||||||
|
p := "test_ledger.jsonl"
|
||||||
|
os.Remove(p)
|
||||||
|
l, err := Open(p)
|
||||||
|
if err != nil { t.Fatal(err) }
|
||||||
|
defer os.Remove(p)
|
||||||
|
defer l.Close()
|
||||||
|
if _, err := l.Append("test_event", map[string]string{"k":"v"}); err != nil { t.Fatal(err) }
|
||||||
|
if _, err := l.Append("test_event2", map[string]string{"a":"b"}); err != nil { t.Fatal(err) }
|
||||||
|
if err := l.Verify(); err != nil { t.Fatal(err) }
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user