Bladeren bron

first commit

ajasibley 6 maanden geleden
commit
1fea9a5fb0
11 gewijzigde bestanden met toevoegingen van 1131 en 0 verwijderingen
  1. 12 0
      .gitignore
  2. 7 0
      LICENSE
  3. 142 0
      README.md
  4. 97 0
      chat.go
  5. 209 0
      dns.go
  6. 7 0
      go.mod
  7. 6 0
      go.sum
  8. 183 0
      http.go
  9. 147 0
      openai.go
  10. 203 0
      ssh.go
  11. 118 0
      util.go

+ 12 - 0
.gitignore

@@ -0,0 +1,12 @@
+# User-provided LLM backend
+llm.go
+
+# Compiled binaries
+chat
+selftest
+
+# SSH host key
+ssh_host_key
+
+# OS files
+.DS_Store

+ 7 - 0
LICENSE

@@ -0,0 +1,7 @@
+Copyright 2025 Deep AI, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 142 - 0
README.md

@@ -0,0 +1,142 @@
+# ch.at - Universal Basic Chat
+
+Minimalist LLM chat accessible through HTTP, SSH, DNS, and API. One binary, no JavaScript, no tracking.
+
+## Usage
+
+```bash
+# Web (no JavaScript)
+open https://ch.at
+
+# Terminal
+curl ch.at/?q=hello
+ssh ch.at
+
+# DNS tunneling
+dig what-is-2+2.ch.at TXT
+
+# API (OpenAI-compatible)
+curl ch.at:8080/v1/chat/completions
+```
+
+## Design
+
+- ~1,100 lines of Go, one external dependency
+- Single static binary
+- No accounts, no logs, no tracking
+- Suckless-style configuration (edit source)
+
+## Privacy
+
+We take a "can't be evil" approach:
+
+- No authentication or user tracking
+- No server-side conversation storage
+- No logs whatsoever
+- Web history stored client-side only
+
+**⚠️ PRIVACY WARNING**: Your queries are sent to LLM providers (OpenAI, Anthropic, etc.) who may log and store them according to their policies. While ch.at doesn't log anything, the upstream providers might. Never send passwords, API keys, or sensitive information.
+
+**Current Production Model**: OpenAI's GPT-4o. We plan to expand model access in the future.
+
+## Installation
+
+Create `llm.go` (gitignored):
+
+```go
+// llm.go - Create this file (it's gitignored)
+package main
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/http"
+	"strings"
+)
+
+func getLLMResponse(ctx context.Context, prompt string) (string, error) {
+	var response strings.Builder
+	stream, err := getLLMResponseStream(ctx, prompt)
+	if err != nil {
+		return "", err
+	}
+	for chunk := range stream {
+		response.WriteString(chunk)
+	}
+	return response.String(), nil
+}
+
+func getLLMResponseStream(ctx context.Context, prompt string) (<-chan string, error) {
+	endpoint := "https://api.openai.com/v1/chat/completions"
+	key := "YOUR-OPENAI-API-KEY-HERE"  // Replace with your key
+	
+	payload := map[string]interface{}{
+		"model": "gpt-4o",
+		"messages": []map[string]string{
+			{"role": "user", "content": prompt},
+		},
+		"stream": true,
+	}
+	
+	// ... rest of implementation
+}
+```
+
+Then build:
+```bash
+go build -o chat .
+sudo ./chat  # Needs root for ports 80/443/53/22
+
+# Optional: build selftest tool
+go build -o selftest ./cmd/selftest
+```
+
+To run on high ports, edit the constants in `chat.go` and rebuild:
+```go
+const (
+    HTTP_PORT   = 8080  // Instead of 80
+    HTTPS_PORT  = 0     // Disabled
+    SSH_PORT    = 2222  // Instead of 22
+    DNS_PORT    = 0     // Disabled
+    OPENAI_PORT = 8080  // Same as HTTP
+)
+```
+
+Then:
+```bash
+go build -o chat .
+./chat  # No sudo needed for high ports
+
+# Test the service
+./selftest http://localhost:8080
+```
+
+## Configuration
+
+Edit constants in source files:
+- Ports: `chat.go` (set to 0 to disable)
+- Rate limits: `util.go`
+- Remove protocol: Delete its .go file
+
+## Limitations
+
+- **DNS**: Responses limited to ~500 chars due to protocol constraints
+- **History**: Limited to 2KB in web interface to prevent URL overflow
+- **Rate limiting**: Basic IP-based limiting to prevent abuse
+- **No encryption**: SSH is encrypted, but HTTP/DNS are not
+
+## License
+
+MIT License - see LICENSE file
+
+## Contributing
+
+Before adding features:
+- Does it increase accessibility?
+- Is it under 50 lines?
+- Is it necessary?
+

+ 97 - 0
chat.go

@@ -0,0 +1,97 @@
+package main
+
+import (
+	"fmt"
+	"os"
+)
+
+// Configuration - edit and recompile to change (Suckless style)
+// To disable a service: set its port to 0 or delete its .go file
+const (
+	HTTP_PORT   = 80    // Web interface (set to 0 to disable)
+	HTTPS_PORT  = 443   // TLS web interface (set to 0 to disable)
+	SSH_PORT    = 22    // Anonymous SSH chat (set to 0 to disable)
+	DNS_PORT    = 53    // DNS TXT chat (set to 0 to disable)
+	OPENAI_PORT = 8080  // OpenAI-compatible API (set to 0 to disable)
+	CERT_FILE   = "cert.pem" // TLS certificate for HTTPS
+	KEY_FILE    = "key.pem"  // TLS key for HTTPS
+)
+
+func main() {
+	// SSH Server
+	if SSH_PORT > 0 {
+		go func() {
+			defer func() {
+				if r := recover(); r != nil {
+					fmt.Fprintf(os.Stderr, "SSH server panic: %v\n", r)
+				}
+			}()
+			sshServer := NewSSHServer(SSH_PORT)
+			if err := sshServer.Start(); err != nil {
+				fmt.Fprintf(os.Stderr, "SSH server error: %v\n", err)
+			}
+		}()
+	}
+	
+	// DNS Server
+	if DNS_PORT > 0 {
+		go func() {
+			defer func() {
+				if r := recover(); r != nil {
+					fmt.Fprintf(os.Stderr, "DNS server panic: %v\n", r)
+				}
+			}()
+			dnsServer := NewDNSServer(DNS_PORT)
+			if err := dnsServer.Start(); err != nil {
+				fmt.Fprintf(os.Stderr, "DNS server error: %v\n", err)
+			}
+		}()
+	}
+	
+	// OpenAI API Server
+	if OPENAI_PORT > 0 {
+		go func() {
+			defer func() {
+				if r := recover(); r != nil {
+					fmt.Fprintf(os.Stderr, "OpenAI API server panic: %v\n", r)
+				}
+			}()
+			openaiServer := NewOpenAIServer(OPENAI_PORT)
+			if err := openaiServer.Start(); err != nil {
+				fmt.Fprintf(os.Stderr, "OpenAI API server error: %v\n", err)
+			}
+		}()
+	}
+	
+	// HTTP/HTTPS Server
+	// TODO: Implement graceful shutdown with signal handling
+	if HTTP_PORT > 0 || HTTPS_PORT > 0 {
+		httpServer := NewHTTPServer(HTTP_PORT)
+		
+		if HTTPS_PORT > 0 {
+			go func() {
+				defer func() {
+					if r := recover(); r != nil {
+						fmt.Fprintf(os.Stderr, "HTTPS server panic: %v\n", r)
+					}
+				}()
+				if err := httpServer.StartTLS(HTTPS_PORT, CERT_FILE, KEY_FILE); err != nil {
+					fmt.Fprintf(os.Stderr, "HTTPS server error: %v\n", err)
+				}
+			}()
+		}
+		
+		if HTTP_PORT > 0 {
+			if err := httpServer.Start(); err != nil {
+				fmt.Fprintf(os.Stderr, "HTTP server error: %v\n", err)
+				os.Exit(1)
+			}
+		} else {
+			// If only HTTPS is enabled, block forever
+			select {}
+		}
+	} else {
+		// If no servers enabled, block forever
+		select {}
+	}
+}

+ 209 - 0
dns.go

@@ -0,0 +1,209 @@
+package main
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"strings"
+)
+
+type DNSServer struct {
+	port int
+}
+
+func NewDNSServer(port int) *DNSServer {
+	return &DNSServer{port: port}
+}
+
+func (s *DNSServer) Start() error {
+	addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", s.port))
+	if err != nil {
+		return err
+	}
+
+	conn, err := net.ListenUDP("udp", addr)
+	if err != nil {
+		return err
+	}
+	defer conn.Close()
+
+	fmt.Printf("DNS server listening on :%d\n", s.port)
+
+	buf := make([]byte, 512) // DNS messages are typically small
+	for {
+		n, clientAddr, err := conn.ReadFromUDP(buf)
+		if err != nil {
+			// Read error - continue
+			continue
+		}
+
+		go s.handleQuery(conn, clientAddr, buf[:n])
+	}
+}
+
+func (s *DNSServer) handleQuery(conn *net.UDPConn, addr *net.UDPAddr, query []byte) {
+	// Validate minimum DNS packet size
+	if len(query) < 12 {
+		return
+	}
+
+	// Rate limiting
+	if !rateLimiter.Allow(addr.String()) {
+		return // Silently drop - DNS doesn't have error responses for rate limits
+	}
+
+	// Validate DNS header flags (must be a query, not response)
+	if query[2]&0x80 != 0 {
+		return // It's a response, not a query
+	}
+
+	// Extract question from query
+	question := extractQuestion(query)
+	if question == "" {
+		return
+	}
+
+	// Remove .ch.at suffix if present
+	question = strings.TrimSuffix(question, ".ch.at")
+	question = strings.TrimSuffix(question, ".")
+	
+	// Convert DNS format to readable (replace - with space)
+	prompt := strings.ReplaceAll(question, "-", " ")
+
+	// Get LLM response
+	ctx := context.Background()
+	response, err := getLLMResponse(ctx, prompt)
+	if err != nil {
+		response = "Error: " + err.Error()
+	}
+
+	// Build DNS response with chunked TXT records
+	reply := buildDNSResponse(query, response)
+	
+	// Ensure response fits in UDP packet (RFC recommends 512 bytes)
+	if len(reply) > 512 {
+		// Truncate and set TC bit
+		reply = reply[:512]
+		reply[2] |= 0x02 // Set TC (truncation) bit
+	}
+	
+	conn.WriteToUDP(reply, addr)
+}
+
+func extractQuestion(query []byte) string {
+	// Skip header (12 bytes)
+	if len(query) < 12 {
+		return ""
+	}
+	
+	pos := 12
+	var name []string
+	totalLength := 0
+	
+	// Parse domain name labels (max 128 to prevent DoS)
+	for i := 0; i < 128 && pos < len(query); i++ {
+		if pos >= len(query) {
+			return ""
+		}
+		
+		length := int(query[pos])
+		if length == 0 {
+			break
+		}
+		
+		// DNS compression uses first 2 bits = 11 (0xC0)
+		// We reject these for simplicity and security
+		if length&0xC0 == 0xC0 {
+			return ""
+		}
+		
+		// DNS label length must be <= 63
+		if length > 63 {
+			return ""
+		}
+		
+		pos++
+		if pos+length > len(query) {
+			return ""
+		}
+		
+		// Track total domain name length (max 255)
+		totalLength += length + 1
+		if totalLength > 255 {
+			return ""
+		}
+		
+		// Validate label contains reasonable characters
+		label := query[pos : pos+length]
+		name = append(name, string(label))
+		pos += length
+	}
+	
+	// Ensure we read a complete question (should have type and class after)
+	if pos+4 > len(query) {
+		return ""
+	}
+	
+	return strings.Join(name, ".")
+}
+
+func buildDNSResponse(query []byte, answer string) []byte {
+	resp := make([]byte, len(query))
+	copy(resp, query)
+	
+	// Set response flags (QR=1, AA=1)
+	resp[2] = 0x81
+	resp[3] = 0x80
+	
+	// Set answer count to 1
+	resp[7] = 1
+	
+	// Skip to end of question section
+	pos := 12
+	for pos < len(resp) {
+		if resp[pos] == 0 {
+			pos += 5 // Skip null terminator + type + class
+			break
+		}
+		pos++
+	}
+	
+	// Add answer section
+	// Pointer to question name
+	resp = append(resp, 0xc0, 0x0c)
+	
+	// Type TXT (16), Class IN (1)
+	resp = append(resp, 0x00, 0x10, 0x00, 0x01)
+	
+	// TTL (0)
+	resp = append(resp, 0x00, 0x00, 0x00, 0x00)
+	
+	// Build TXT record data with chunking
+	txtData := buildTXTData(answer)
+	
+	// Data length
+	resp = append(resp, byte(len(txtData)>>8), byte(len(txtData)))
+	
+	// TXT data
+	resp = append(resp, txtData...)
+	
+	return resp
+}
+
+func buildTXTData(text string) []byte {
+	var data []byte
+	
+	// Split into 255-byte chunks
+	for len(text) > 0 {
+		chunkLen := len(text)
+		if chunkLen > 255 {
+			chunkLen = 255
+		}
+		
+		data = append(data, byte(chunkLen))
+		data = append(data, text[:chunkLen]...)
+		text = text[chunkLen:]
+	}
+	
+	return data
+}

+ 7 - 0
go.mod

@@ -0,0 +1,7 @@
+module ch.at
+
+go 1.21
+
+require golang.org/x/crypto v0.28.0
+
+require golang.org/x/sys v0.26.0 // indirect

+ 6 - 0
go.sum

@@ -0,0 +1,6 @@
+golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
+golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
+golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
+golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=

+ 183 - 0
http.go

@@ -0,0 +1,183 @@
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"html"
+	"io"
+	"net/http"
+	"strings"
+	"time"
+)
+
+const minimalHTML = `<!DOCTYPE html>
+<html>
+<head>
+    <title>ch.at</title>
+    <style>
+        body { text-align: center; margin: 40px; }
+        pre { text-align: left; max-width: 600px; margin: 20px auto; padding: 20px; 
+              white-space: pre-wrap; word-wrap: break-word; }
+        input[type="text"] { width: 300px; }
+    </style>
+</head>
+<body>
+    <h1>ch.at</h1>
+    <p><i>pronounced "ch-dot-at"</i></p>
+    <pre>%s</pre>
+    <form method="POST" action="/">
+        <input type="text" name="q" placeholder="Type your message..." autofocus>
+        <textarea name="h" style="display:none">%s</textarea>
+        <input type="submit" value="Send">
+    </form>
+    <p><a href="/">Clear History</a> • <a href="https://github.com/ch-at/ch.at#readme">About</a></p>
+</body>
+</html>`
+
+type HTTPServer struct {
+	port int
+}
+
+func NewHTTPServer(port int) *HTTPServer {
+	return &HTTPServer{port: port}
+}
+
+func (s *HTTPServer) Start() error {
+	http.HandleFunc("/", s.handleRoot)
+
+	addr := fmt.Sprintf(":%d", s.port)
+	fmt.Printf("HTTP server listening on %s\n", addr)
+	return http.ListenAndServe(addr, nil)
+}
+
+func (s *HTTPServer) StartTLS(port int, certFile, keyFile string) error {
+	addr := fmt.Sprintf(":%d", port)
+	fmt.Printf("HTTPS server listening on %s\n", addr)
+	return http.ListenAndServeTLS(addr, certFile, keyFile, nil)
+}
+
+func (s *HTTPServer) handleRoot(w http.ResponseWriter, r *http.Request) {
+	if !rateLimiter.Allow(r.RemoteAddr) {
+		http.Error(w, "Rate limit exceeded. Please try again later.", http.StatusTooManyRequests)
+		return
+	}
+
+	var query, history, prompt string
+	content := ""
+	jsonResponse := ""
+
+	if r.Method == "POST" {
+		if err := r.ParseForm(); err != nil {
+			http.Error(w, "Failed to parse form", http.StatusBadRequest)
+			return
+		}
+		query = r.FormValue("q")
+		history = r.FormValue("h")
+
+		// Limit history size to prevent abuse
+		if len(history) > 2048 {
+			history = history[len(history)-2048:]
+		}
+
+		// If no form fields, treat body as raw query (for curl)
+		if query == "" {
+			body, err := io.ReadAll(io.LimitReader(r.Body, 4096)) // Limit body size
+			if err != nil {
+				http.Error(w, "Failed to read request body", http.StatusBadRequest)
+				return
+			}
+			query = string(body)
+		}
+	} else {
+		// GET request - no history
+		query = r.URL.Query().Get("q")
+	}
+
+	ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
+	defer cancel()
+
+	if query != "" {
+		// Build prompt with history
+		prompt = query
+		if history != "" {
+			prompt = history + "Q: " + query
+		}
+
+		response, err := getLLMResponse(ctx, prompt)
+		if err != nil {
+			content = fmt.Sprintf("Error: %s", err.Error())
+			errJSON, _ := json.Marshal(map[string]string{"error": err.Error()})
+			jsonResponse = string(errJSON)
+		} else {
+			// Store JSON response
+			respJSON, _ := json.Marshal(map[string]string{
+				"question": query,
+				"answer":   response,
+			})
+			jsonResponse = string(respJSON)
+
+			// Append to history
+			newExchange := fmt.Sprintf("Q: %s\nA: %s\n\n", query, response)
+			if history != "" {
+				content = history + newExchange
+			} else {
+				content = newExchange
+			}
+			// Trim history if too long (UTF-8 safe)
+			if len(content) > 2048 {
+				// UTF-8 continuation bytes start with 10xxxxxx (0x80-0xBF)
+				// Find a character boundary to avoid splitting multi-byte chars
+				for i := len(content) - 2048; i < len(content)-2040; i++ {
+					if content[i]&0xC0 != 0x80 { // Not a continuation byte
+						content = content[i:]
+						break
+					}
+				}
+			}
+		}
+	} else if history != "" {
+		content = history
+	}
+
+	accept := r.Header.Get("Accept")
+
+	// Stream for curl when requested
+	if strings.Contains(accept, "text/event-stream") && query != "" {
+		w.Header().Set("Content-Type", "text/event-stream")
+		w.Header().Set("Cache-Control", "no-cache")
+		w.Header().Set("Connection", "keep-alive")
+
+		flusher, ok := w.(http.Flusher)
+		if !ok {
+			http.Error(w, "Streaming not supported", http.StatusInternalServerError)
+			return
+		}
+
+		stream, err := getLLMResponseStream(ctx, prompt)
+		if err != nil {
+			fmt.Fprintf(w, "data: Error: %s\n\n", err.Error())
+			return
+		}
+
+		for chunk := range stream {
+			fmt.Fprintf(w, "data: %s\n\n", chunk)
+			flusher.Flush()
+		}
+		fmt.Fprintf(w, "data: [DONE]\n\n")
+		return
+	}
+
+	// Return JSON for API requests, HTML for browsers, plain text for curl
+	if strings.Contains(accept, "application/json") && jsonResponse != "" {
+		w.Header().Set("Content-Type", "application/json; charset=utf-8")
+		fmt.Fprint(w, jsonResponse)
+	} else if strings.Contains(accept, "text/html") {
+		w.Header().Set("Content-Type", "text/html; charset=utf-8")
+		fmt.Fprintf(w, minimalHTML, html.EscapeString(content), html.EscapeString(content))
+	} else {
+		// Default to plain text for curl and other tools
+		w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+		fmt.Fprint(w, content)
+	}
+}

+ 147 - 0
openai.go

@@ -0,0 +1,147 @@
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"strings"
+	"time"
+)
+
+type OpenAIServer struct {
+	port int
+}
+
+func NewOpenAIServer(port int) *OpenAIServer {
+	return &OpenAIServer{port: port}
+}
+
+func (s *OpenAIServer) Start() error {
+	mux := http.NewServeMux()
+	mux.HandleFunc("/v1/chat/completions", s.handleChatCompletions)
+	
+	addr := fmt.Sprintf(":%d", s.port)
+	fmt.Printf("OpenAI API server listening on %s\n", addr)
+	return http.ListenAndServe(addr, mux)
+}
+
+type ChatRequest struct {
+	Model    string    `json:"model"`
+	Messages []Message `json:"messages"`
+	Stream   bool      `json:"stream,omitempty"`
+}
+
+type Message struct {
+	Role    string `json:"role"`
+	Content string `json:"content"`
+}
+
+type ChatResponse struct {
+	ID      string   `json:"id"`
+	Object  string   `json:"object"`
+	Created int64    `json:"created"`
+	Model   string   `json:"model"`
+	Choices []Choice `json:"choices"`
+}
+
+type Choice struct {
+	Index   int     `json:"index"`
+	Message Message `json:"message"`
+}
+
+
+func (s *OpenAIServer) handleChatCompletions(w http.ResponseWriter, r *http.Request) {
+	if r.Method != "POST" {
+		http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
+		return
+	}
+
+	var req ChatRequest
+	if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+		http.Error(w, "Invalid JSON", http.StatusBadRequest)
+		return
+	}
+
+	// Convert messages to single prompt
+	prompt := buildPrompt(req.Messages)
+	
+	// Call our chat function
+	ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
+	defer cancel()
+	
+	if req.Stream {
+		// Streaming response
+		w.Header().Set("Content-Type", "text/event-stream")
+		w.Header().Set("Cache-Control", "no-cache")
+		w.Header().Set("Connection", "keep-alive")
+		
+		flusher, ok := w.(http.Flusher)
+		if !ok {
+			http.Error(w, "Streaming not supported", http.StatusInternalServerError)
+			return
+		}
+		
+		stream, err := getLLMResponseStream(ctx, prompt)
+		if err != nil {
+			fmt.Fprintf(w, "data: {\"error\": \"%s\"}\n\n", err.Error())
+			return
+		}
+		
+		for chunk := range stream {
+			resp := map[string]interface{}{
+				"id": fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
+				"object": "chat.completion.chunk",
+				"created": time.Now().Unix(),
+				"model": req.Model,
+				"choices": []map[string]interface{}{{
+					"index": 0,
+					"delta": map[string]string{"content": chunk},
+				}},
+			}
+			data, err := json.Marshal(resp)
+			if err != nil {
+				fmt.Fprintf(w, "data: error marshaling response\n\n")
+				return
+			}
+			fmt.Fprintf(w, "data: %s\n\n", data)
+			flusher.Flush()
+		}
+		fmt.Fprintf(w, "data: [DONE]\n\n")
+		
+	} else {
+		// Non-streaming response
+		response, err := getLLMResponse(ctx, prompt)
+		if err != nil {
+			http.Error(w, fmt.Sprintf("Chat error: %v", err), http.StatusInternalServerError)
+			return
+		}
+
+		// Return OpenAI-compatible response
+		chatResp := ChatResponse{
+			ID:      fmt.Sprintf("chatcmpl-%d", time.Now().Unix()),
+			Object:  "chat.completion",
+			Created: time.Now().Unix(),
+			Model:   req.Model,
+			Choices: []Choice{{
+				Index: 0,
+				Message: Message{
+					Role:    "assistant",
+					Content: response,
+				},
+			}},
+		}
+
+		w.Header().Set("Content-Type", "application/json")
+		json.NewEncoder(w).Encode(chatResp)
+	}
+}
+
+func buildPrompt(messages []Message) string {
+	// Simple: just concatenate messages
+	var parts []string
+	for _, msg := range messages {
+		parts = append(parts, msg.Content)
+	}
+	return strings.Join(parts, "\n")
+}

+ 203 - 0
ssh.go

@@ -0,0 +1,203 @@
+package main
+
+import (
+	"context"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"strings"
+
+	"golang.org/x/crypto/ssh"
+)
+
+type SSHServer struct {
+	port int
+}
+
+func NewSSHServer(port int) *SSHServer {
+	return &SSHServer{port: port}
+}
+
+func (s *SSHServer) Start() error {
+	// SSH server configuration
+	config := &ssh.ServerConfig{
+		NoClientAuth: true, // Anonymous access
+	}
+
+	// Get or create persistent host key
+	privateKey, err := getOrCreateHostKey()
+	if err != nil {
+		return fmt.Errorf("failed to get host key: %v", err)
+	}
+	config.AddHostKey(privateKey)
+
+	// Listen for connections
+	listener, err := net.Listen("tcp", fmt.Sprintf(":%d", s.port))
+	if err != nil {
+		return err
+	}
+	defer listener.Close()
+
+	fmt.Printf("SSH server listening on :%d\n", s.port)
+
+	// Simple connection limiting
+	sem := make(chan struct{}, 100) // Max 100 concurrent SSH connections
+	
+	for {
+		conn, err := listener.Accept()
+		if err != nil {
+			// Connection error - continue accepting others
+			continue
+		}
+		
+		select {
+		case sem <- struct{}{}:
+			go func() {
+				defer func() { <-sem }()
+				s.handleConnection(conn, config)
+			}()
+		default:
+			// Too many connections
+			conn.Close()
+		}
+	}
+}
+
+func (s *SSHServer) handleConnection(netConn net.Conn, config *ssh.ServerConfig) {
+	defer netConn.Close()
+
+	// Rate limiting
+	if !rateLimiter.Allow(netConn.RemoteAddr().String()) {
+		netConn.Write([]byte("Rate limit exceeded. Please try again later.\r\n"))
+		return
+	}
+
+	// Perform SSH handshake
+	sshConn, chans, reqs, err := ssh.NewServerConn(netConn, config)
+	if err != nil {
+		// Handshake failed - continue accepting others
+		return
+	}
+	defer sshConn.Close()
+
+	go ssh.DiscardRequests(reqs)
+
+	// Handle channels (sessions)
+	for newChannel := range chans {
+		if newChannel.ChannelType() != "session" {
+			newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
+			continue
+		}
+
+		channel, requests, err := newChannel.Accept()
+		if err != nil {
+			// Channel error - continue
+			continue
+		}
+
+		go s.handleSession(channel, requests)
+	}
+}
+
+func (s *SSHServer) handleSession(channel ssh.Channel, requests <-chan *ssh.Request) {
+	defer channel.Close()
+
+	// Handle session requests
+	go func() {
+		for req := range requests {
+			switch req.Type {
+			case "shell", "pty-req":
+				req.Reply(true, nil)
+			default:
+				req.Reply(false, nil)
+			}
+		}
+	}()
+
+	fmt.Fprintf(channel, "Welcome to ch.at\r\n")
+	fmt.Fprintf(channel, "Type your message and press Enter. Type 'exit' to quit.\r\n")
+	fmt.Fprintf(channel, "> ")
+
+	// Read line by line
+	var input strings.Builder
+	buf := make([]byte, 1024)
+
+	for {
+		n, err := channel.Read(buf)
+		if err != nil {
+			if err != io.EOF {
+				// Read error - exit session
+			}
+			return
+		}
+
+		data := string(buf[:n])
+		for _, ch := range data {
+			if ch == '\n' || ch == '\r' {
+				if input.Len() > 0 {
+					query := strings.TrimSpace(input.String())
+					input.Reset()
+
+					if query == "exit" {
+						fmt.Fprintf(channel, "Goodbye!\r\n")
+						return
+					}
+
+					// Get LLM response with streaming
+					ctx := context.Background()
+					stream, err := getLLMResponseStream(ctx, query)
+					if err != nil {
+						fmt.Fprintf(channel, "Error: %v\r\n", err)
+						fmt.Fprintf(channel, "> ")
+						continue
+					}
+					
+					// Stream response as it arrives
+					for chunk := range stream {
+						fmt.Fprint(channel, chunk)
+						if f, ok := channel.(interface{ Flush() }); ok {
+							f.Flush()
+						}
+					}
+					fmt.Fprintf(channel, "\r\n> ")
+				}
+			} else {
+				input.WriteRune(ch)
+			}
+		}
+	}
+}
+
+// getOrCreateHostKey loads existing key or generates new one
+func getOrCreateHostKey() (ssh.Signer, error) {
+	keyPath := "ssh_host_key"
+	
+	// Try to load existing key
+	if keyData, err := os.ReadFile(keyPath); err == nil {
+		return ssh.ParsePrivateKey(keyData)
+	}
+
+	// Generate new ephemeral key (more private but less convenient)
+	// Users will see "host key changed" warnings on each restart
+	key, err := rsa.GenerateKey(rand.Reader, 2048)
+	if err != nil {
+		return nil, err
+	}
+
+	// Optionally save for convenience (comment out for max privacy)
+	keyData := pem.EncodeToMemory(&pem.Block{
+		Type:  "RSA PRIVATE KEY",
+		Bytes: x509.MarshalPKCS1PrivateKey(key),
+	})
+	
+	if err := os.WriteFile(keyPath, keyData, 0600); err != nil {
+		// Couldn't save host key - continue anyway
+	}
+
+	return ssh.NewSignerFromKey(key)
+}

+ 118 - 0
util.go

@@ -0,0 +1,118 @@
+package main
+
+import (
+	"net"
+	"sync"
+	"time"
+)
+
+// Simple in-memory rate limiter
+// To disable: Remove NewRateLimiter calls from protocol files
+type RateLimiter struct {
+	requests map[string][]time.Time
+	mu       sync.Mutex
+	limit    int           // requests per window
+	window   time.Duration // time window
+	stopCh   chan struct{} // for cleanup goroutine
+}
+
+func NewRateLimiter(limit int, window time.Duration) *RateLimiter {
+	r := &RateLimiter{
+		requests: make(map[string][]time.Time),
+		limit:    limit,
+		window:   window,
+		stopCh:   make(chan struct{}),
+	}
+	
+	// Start cleanup goroutine
+	go r.cleanup()
+	
+	return r
+}
+
+func (r *RateLimiter) Allow(addr string) bool {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	// Extract IP without port (addr might be "1.2.3.4:5678" or just "1.2.3.4")
+	ip, _, _ := net.SplitHostPort(addr)
+	if ip == "" {
+		ip = addr // addr was already just an IP
+	}
+
+	now := time.Now()
+	cutoff := now.Add(-r.window)
+
+	// Get or create request list
+	requests := r.requests[ip]
+	
+	// Remove old requests
+	valid := []time.Time{}
+	for _, t := range requests {
+		if t.After(cutoff) {
+			valid = append(valid, t)
+		}
+	}
+
+	// Check limit
+	if len(valid) >= r.limit {
+		return false
+	}
+
+	// Add new request
+	valid = append(valid, now)
+	r.requests[ip] = valid
+	
+	return true
+}
+
+// Periodic cleanup to prevent unbounded memory growth
+func (r *RateLimiter) cleanup() {
+	ticker := time.NewTicker(r.window)
+	defer ticker.Stop()
+	
+	for {
+		select {
+		case <-ticker.C:
+			r.mu.Lock()
+			now := time.Now()
+			cutoff := now.Add(-r.window)
+			
+			// Remove IPs with no recent requests
+			for ip, requests := range r.requests {
+				valid := []time.Time{}
+				for _, t := range requests {
+					if t.After(cutoff) {
+						valid = append(valid, t)
+					}
+				}
+				
+				if len(valid) == 0 {
+					delete(r.requests, ip)
+				} else {
+					r.requests[ip] = valid
+				}
+			}
+			r.mu.Unlock()
+			
+		case <-r.stopCh:
+			return
+		}
+	}
+}
+
+// Stop the rate limiter cleanup
+func (r *RateLimiter) Stop() {
+	select {
+	case <-r.stopCh:
+		// Already closed
+	default:
+		close(r.stopCh)
+	}
+}
+
+// Add other shared utilities here as needed
+// Each should be self-contained and optional
+
+// Global rate limiter instance
+var rateLimiter = NewRateLimiter(100, time.Minute) // 100 requests per minute per IP