diff --git a/.env.test.example b/.env.test.example new file mode 100644 index 0000000..aa67a88 --- /dev/null +++ b/.env.test.example @@ -0,0 +1,21 @@ +# Test Environment Configuration +# Copy this file to .env.test and fill in your test credentials + +# Bluesky Test Account Credentials +# Create a dedicated test account on bsky.social for testing +TEST_BLUESKY_USERNAME=your-test-user.bsky.social +TEST_BLUESKY_PASSWORD=your-test-password + +# Application URLs +TEST_APP_URL=http://localhost:3000 + +# Test Database (if using separate test DB) +TEST_SURREALDB_URL=ws://localhost:8000/rpc +TEST_SURREALDB_NS=test +TEST_SURREALDB_DB=ponderants_test +TEST_SURREALDB_USER=root +TEST_SURREALDB_PASS=root + +# API Keys for Testing (optional - can use same as dev) +# TEST_GOOGLE_GENERATIVE_AI_API_KEY=your-test-api-key +# TEST_DEEPGRAM_API_KEY=your-test-api-key diff --git a/.gitignore b/.gitignore index eca5129..5e6b046 100644 --- a/.gitignore +++ b/.gitignore @@ -27,6 +27,7 @@ yarn-error.log* # local env files .env .env*.local +.env.test # vercel .vercel diff --git a/AGENTS.md b/AGENTS.md index d3da145..058c272 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -11,6 +11,12 @@ EOF )" ``` +**Test Credentials**: For testing and development, use the test Bluesky account credentials stored in the .env file: +- Handle: TEST_BLUESKY_HANDLE (aprongecko.bsky.social) +- Password: TEST_BLUESKY_PASSWORD (Candles1) + +These credentials should be used for all automated testing (Magnitude, Playwright) and manual testing when needed. Do not attempt to authenticate without using these credentials. + You are an expert-level, full-stack AI coding agent. Your task is to implement the "Ponderants" application. Product Vision: Ponderants is an AI-powered thought partner that interviews a user to capture, structure, and visualize diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..b1689d9 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,62 @@ +# Security Considerations + +## 🚨 KNOWN SECURITY ISSUES + +### Voice Transcription API Key Exposure + +**Status:** Known issue - needs fix before production + +**Issue:** The Deepgram API key is currently exposed to the frontend when users click the microphone button for voice transcription. + +**Location:** `app/api/voice-token/route.ts` + +**Risk:** Users with browser dev tools can extract the API key and use it for their own purposes, potentially incurring charges or exhausting API quotas. + +**Why this exists:** +- Temporary tokens from `deepgram.auth.grantToken()` fail with WebSocket connections +- Direct API key usage is currently the only working approach for client-side WebSocket transcription + +**Temporary mitigations:** +- API key only exposed when user actively requests voice transcription +- Usage can be monitored through Deepgram dashboard +- Can implement rate limiting on the `/api/voice-token` endpoint + +**Proper fix options:** +1. **Server-side proxy (recommended):** + - Implement a WebSocket proxy server that handles Deepgram communication + - Client connects to our proxy, proxy forwards to Deepgram with API key + - Requires stateful server infrastructure (not serverless) + +2. **Usage-limited keys:** + - Use Deepgram API keys with strict usage limits + - Rotate keys frequently + - Implement server-side rate limiting per user + +3. **Alternative transcription approach:** + - Record audio client-side + - Send audio files to server endpoint + - Server transcribes using Deepgram API + - Less real-time but more secure + +**Action Required:** Choose and implement one of the above solutions before production deployment. + +--- + +## Other Security Best Practices + +### Environment Variables +All sensitive credentials must be stored in `.env` and never committed to git: +- `DEEPGRAM_API_KEY` +- `GOOGLE_GENERATIVE_AI_API_KEY` +- `SURREAL_JWT_SECRET` +- Database credentials + +### Authentication +- JWT tokens stored in httpOnly cookies +- SurrealDB permission system enforces data access controls +- OAuth flow validates user identity through ATproto + +### Input Validation +- All API endpoints validate inputs server-side +- AI-generated content is sanitized before display +- GraphQL queries use parameterized inputs diff --git a/app/api/auth/callback/route.ts b/app/api/auth/callback/route.ts index 2e6ba12..3862bc7 100644 --- a/app/api/auth/callback/route.ts +++ b/app/api/auth/callback/route.ts @@ -100,9 +100,11 @@ export async function GET(request: NextRequest) { // Parse custom state to determine redirect URL let returnTo = '/chat'; try { - const customState = JSON.parse(state); - if (customState.returnTo) { - returnTo = customState.returnTo; + if (state) { + const customState = JSON.parse(state); + if (customState.returnTo) { + returnTo = customState.returnTo; + } } } catch { // Invalid state JSON, use default diff --git a/app/api/auth/login/route.ts b/app/api/auth/login/route.ts index 9994227..0e147c5 100644 --- a/app/api/auth/login/route.ts +++ b/app/api/auth/login/route.ts @@ -50,7 +50,7 @@ export async function POST(request: NextRequest) { if (error instanceof z.ZodError) { return NextResponse.json( - { error: 'Invalid request', details: error.errors }, + { error: 'Invalid request', details: error.issues }, { status: 400 } ); } diff --git a/app/api/calculate-graph/route.ts b/app/api/calculate-graph/route.ts index 1fc0e88..341769f 100644 --- a/app/api/calculate-graph/route.ts +++ b/app/api/calculate-graph/route.ts @@ -1,6 +1,8 @@ import { NextRequest, NextResponse } from 'next/server'; import { cookies } from 'next/headers'; import { UMAP } from 'umap-js'; +import { connectToDB } from '@/lib/db'; +import { verifySurrealJwt } from '@/lib/auth/jwt'; /** * POST /api/calculate-graph @@ -19,28 +21,16 @@ export async function POST(request: NextRequest) { return NextResponse.json({ error: 'Not authenticated' }, { status: 401 }); } + // Verify JWT to get user's DID + const userSession = verifySurrealJwt(surrealJwt); + if (!userSession) { + return NextResponse.json({ error: 'Invalid auth token' }, { status: 401 }); + } + + const { did: userDid } = userSession; + try { - // NOTE: For the hackathon, we use root credentials instead of JWT auth for simplicity. - // In production, this should use user-scoped authentication with proper SCOPE configuration. - const db = new (await import('surrealdb')).default(); - await db.connect(process.env.SURREALDB_URL!); - await db.signin({ - username: process.env.SURREALDB_USER!, - password: process.env.SURREALDB_PASS!, - }); - await db.use({ - namespace: process.env.SURREALDB_NS!, - database: process.env.SURREALDB_DB!, - }); - - // Get the user's DID from the JWT to filter nodes - const jwt = require('jsonwebtoken'); - const decoded = jwt.decode(surrealJwt) as { did: string }; - const userDid = decoded?.did; - - if (!userDid) { - return NextResponse.json({ error: 'Invalid authentication token' }, { status: 401 }); - } + const db = await connectToDB(); // 1. Fetch all nodes that have an embedding but no coords_3d (filtered by user_did) const query = `SELECT id, embedding FROM node WHERE user_did = $userDid AND embedding != NONE AND coords_3d = NONE`; diff --git a/app/api/chat/route.ts b/app/api/chat/route.ts index 173da91..c9205f2 100644 --- a/app/api/chat/route.ts +++ b/app/api/chat/route.ts @@ -53,10 +53,26 @@ For all other conversation, just respond as a helpful AI.`; messages: convertToModelMessages(messages), // Provide the schema as a 'tool' to the model + // Tools in AI SDK v5 use inputSchema instead of parameters tools: { suggest_node: { description: 'Suggest a new thought node when an idea is complete.', - schema: NodeSuggestionSchema, + inputSchema: z.object({ + title: z + .string() + .describe('A concise, descriptive title for the thought node.'), + content: z + .string() + .describe('The full, well-structured content of the thought node.'), + tags: z + .array(z.string()) + .optional() + .describe('Optional tags for categorizing the node.'), + }), + execute: async ({ title, content, tags }) => ({ + success: true, + suggestion: { title, content, tags }, + }), }, }, }); diff --git a/app/api/debug/nodes/route.ts b/app/api/debug/nodes/route.ts new file mode 100644 index 0000000..63ae88f --- /dev/null +++ b/app/api/debug/nodes/route.ts @@ -0,0 +1,62 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { cookies } from 'next/headers'; +import { connectToDB } from '@/lib/db'; +import { verifySurrealJwt } from '@/lib/auth/jwt'; + +/** + * GET /api/debug/nodes + * + * Debug route to inspect node storage + */ +export async function GET(request: NextRequest) { + const cookieStore = await cookies(); + const surrealJwt = cookieStore.get('ponderants-auth')?.value; + + if (!surrealJwt) { + return NextResponse.json({ error: 'Not authenticated' }, { status: 401 }); + } + + const userSession = verifySurrealJwt(surrealJwt); + if (!userSession) { + return NextResponse.json({ error: 'Invalid auth token' }, { status: 401 }); + } + + const { did: userDid } = userSession; + + try { + const db = await connectToDB(); + + // Get all nodes for this user + const nodesQuery = ` + SELECT id, title, body, atp_uri, embedding, coords_3d + FROM node + WHERE user_did = $userDid + `; + const results = await db.query(nodesQuery, { userDid }); + const nodes = results[0] || []; + + // Count stats + const stats = { + total: nodes.length, + with_embeddings: nodes.filter((n: any) => n.embedding).length, + with_coords: nodes.filter((n: any) => n.coords_3d).length, + without_embeddings: nodes.filter((n: any) => !n.embedding).length, + without_coords: nodes.filter((n: any) => !n.coords_3d).length, + }; + + return NextResponse.json({ + stats, + nodes: nodes.map((n: any) => ({ + id: n.id, + title: n.title, + atp_uri: n.atp_uri, + has_embedding: !!n.embedding, + has_coords: !!n.coords_3d, + coords_3d: n.coords_3d, + })), + }); + } catch (error) { + console.error('[Debug Nodes] Error:', error); + return NextResponse.json({ error: String(error) }, { status: 500 }); + } +} diff --git a/app/api/galaxy/route.ts b/app/api/galaxy/route.ts new file mode 100644 index 0000000..c4b5234 --- /dev/null +++ b/app/api/galaxy/route.ts @@ -0,0 +1,105 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { cookies } from 'next/headers'; +import { connectToDB } from '@/lib/db'; +import { verifySurrealJwt } from '@/lib/auth/jwt'; + +interface NodeData { + id: string; + title: string; + coords_3d: [number, number, number]; +} + +interface LinkData { + in: string; + out: string; +} + +/** + * GET /api/galaxy + * + * Fetches nodes with 3D coordinates and their links for visualization. + * Automatically triggers graph calculation if needed. + */ +export async function GET(request: NextRequest) { + const cookieStore = await cookies(); + const surrealJwt = cookieStore.get('ponderants-auth')?.value; + + if (!surrealJwt) { + return NextResponse.json({ error: 'Not authenticated' }, { status: 401 }); + } + + // Verify JWT to get user's DID + const userSession = verifySurrealJwt(surrealJwt); + if (!userSession) { + return NextResponse.json({ error: 'Invalid auth token' }, { status: 401 }); + } + + const { did: userDid } = userSession; + + try { + const db = await connectToDB(); + + // Fetch nodes that have 3D coordinates + const nodesQuery = ` + SELECT id, title, coords_3d + FROM node + WHERE user_did = $userDid AND coords_3d != NONE + `; + const nodeResults = await db.query<[NodeData[]]>(nodesQuery, { userDid }); + const nodes = nodeResults[0] || []; + + // Fetch links between nodes + const linksQuery = ` + SELECT in, out + FROM links_to + `; + const linkResults = await db.query<[LinkData[]]>(linksQuery); + const links = linkResults[0] || []; + + // If we have nodes but no coordinates, check if we should calculate + if (nodes.length === 0) { + // Check if we have nodes with embeddings but no coordinates + const unmappedQuery = ` + SELECT count() as count + FROM node + WHERE user_did = $userDid AND embedding != NONE AND coords_3d = NONE + GROUP ALL + `; + const unmappedResults = await db.query<[Array<{ count: number }>]>(unmappedQuery, { userDid }); + const unmappedCount = unmappedResults[0]?.[0]?.count || 0; + + if (unmappedCount >= 3) { + console.log(`[Galaxy API] Found ${unmappedCount} unmapped nodes, triggering calculation...`); + + // Trigger graph calculation (don't await, return current state) + fetch(`${process.env.NEXT_PUBLIC_BASE_URL || 'http://localhost:3000'}/api/calculate-graph`, { + method: 'POST', + headers: { + 'Cookie': `ponderants-auth=${surrealJwt}`, + }, + }).catch((err) => { + console.error('[Galaxy API] Failed to trigger graph calculation:', err); + }); + + return NextResponse.json({ + nodes: [], + links: [], + message: 'Calculating 3D coordinates... Refresh in a moment.', + }); + } + } + + console.log(`[Galaxy API] Returning ${nodes.length} nodes and ${links.length} links`); + + return NextResponse.json({ + nodes, + links, + }); + } catch (error) { + console.error('[Galaxy API] Error:', error); + return NextResponse.json( + { error: 'Failed to fetch galaxy data' }, + { status: 500 } + ); + } +} diff --git a/app/api/generate-node-draft/route.ts b/app/api/generate-node-draft/route.ts new file mode 100644 index 0000000..27b790c --- /dev/null +++ b/app/api/generate-node-draft/route.ts @@ -0,0 +1,114 @@ +/** + * API Route: Generate Node Draft + * + * Takes a conversation history and uses AI to generate a structured node draft + * with title and content that captures the key insights from the conversation. + */ + +import { google } from '@ai-sdk/google'; +import { generateText } from 'ai'; +import { NextRequest, NextResponse } from 'next/server'; + +const model = google('gemini-2.0-flash-exp'); + +export async function POST(request: NextRequest) { + try { + const { messages } = await request.json(); + + if (!Array.isArray(messages) || messages.length === 0) { + return NextResponse.json( + { error: 'Invalid or empty conversation' }, + { status: 400 } + ); + } + + // Format conversation for the AI + const conversationText = messages + .map((m: any) => { + const role = m.role === 'user' ? 'User' : 'AI'; + let content = ''; + + if ('parts' in m && Array.isArray(m.parts)) { + const textParts = m.parts.filter((p: any) => p.type === 'text'); + content = textParts.map((p: any) => p.text).join('\n'); + } else if (m.content) { + content = m.content; + } + + return `${role}: ${content}`; + }) + .join('\n\n'); + + // Generate node draft using AI + const result = await generateText({ + model, + prompt: `You are helping a user capture their thoughts as a structured "Node" - a mini blog post. + +Analyze the following conversation and create a Node draft that: +1. Captures the core insight or topic discussed +2. Structures the content coherently +3. Preserves the user's voice and key ideas +4. Focuses on the most important takeaways + +Conversation: +${conversationText} + +Respond with a JSON object containing: +- title: A concise, compelling title (3-8 words) +- content: The main body in markdown format (200-500 words, use headings/lists where appropriate) + +Format your response as valid JSON only, no additional text.`, + }); + + // Parse the AI response + let draft; + try { + draft = JSON.parse(result.text); + } catch (e) { + // If JSON parsing fails, try to extract from markdown code block + const jsonMatch = result.text.match(/```json\s*([\s\S]*?)\s*```/); + if (jsonMatch) { + draft = JSON.parse(jsonMatch[1]); + } else { + throw new Error('Failed to parse AI response as JSON'); + } + } + + // Validate the draft structure + if (!draft.title || !draft.content) { + throw new Error('Generated draft missing required fields'); + } + + // Add conversation context (last 3 messages for reference) + const contextMessages = messages.slice(-3); + const conversationContext = contextMessages + .map((m: any) => { + const role = m.role === 'user' ? 'User' : 'AI'; + let content = ''; + + if ('parts' in m && Array.isArray(m.parts)) { + const textParts = m.parts.filter((p: any) => p.type === 'text'); + content = textParts.map((p: any) => p.text).join('\n'); + } else if (m.content) { + content = m.content; + } + + return `${role}: ${content}`; + }) + .join('\n\n'); + + return NextResponse.json({ + draft: { + title: draft.title, + content: draft.content, + conversationContext, + }, + }); + } catch (error) { + console.error('[Generate Node Draft] Error:', error); + return NextResponse.json( + { error: error instanceof Error ? error.message : 'Failed to generate node draft' }, + { status: 500 } + ); + } +} diff --git a/app/api/nodes/route.ts b/app/api/nodes/route.ts index 2461347..7e2e33c 100644 --- a/app/api/nodes/route.ts +++ b/app/api/nodes/route.ts @@ -1,27 +1,35 @@ import { NextRequest, NextResponse } from 'next/server'; import { cookies } from 'next/headers'; -import { AtpAgent, RichText } from '@atproto/api'; +import { RichText, Agent } from '@atproto/api'; import { connectToDB } from '@/lib/db'; import { generateEmbedding } from '@/lib/ai'; import { verifySurrealJwt } from '@/lib/auth/jwt'; +import { getOAuthClient } from '@/lib/auth/oauth-client'; export async function POST(request: NextRequest) { const cookieStore = await cookies(); const surrealJwt = cookieStore.get('ponderants-auth')?.value; - const atpAccessToken = cookieStore.get('atproto_access_token')?.value; - if (!surrealJwt || !atpAccessToken) { + console.log('[POST /api/nodes] Auth check:', { + hasSurrealJwt: !!surrealJwt, + }); + + if (!surrealJwt) { + console.error('[POST /api/nodes] Missing auth cookie'); return NextResponse.json({ error: 'Not authenticated' }, { status: 401 }); } // Verify the JWT and extract user info const userSession = verifySurrealJwt(surrealJwt); if (!userSession) { + console.error('[POST /api/nodes] Invalid JWT'); return NextResponse.json({ error: 'Invalid auth token' }, { status: 401 }); } const { did: userDid } = userSession; + console.log('[POST /api/nodes] Verified user DID:', userDid); + const { title, body, links } = (await request.json()) as { title: string; body: string; @@ -39,67 +47,95 @@ export async function POST(request: NextRequest) { let atp_cid: string; try { - // Get the PDS URL from environment or use default - const pdsUrl = process.env.BLUESKY_PDS_URL || 'https://bsky.social'; - const agent = new AtpAgent({ service: pdsUrl }); + // Get the OAuth client and restore the user's session + const client = await getOAuthClient(); + console.log('[POST /api/nodes] Got OAuth client, attempting to restore session for DID:', userDid); - // Resume the session with the access token - await agent.resumeSession({ - accessJwt: atpAccessToken, - refreshJwt: '', // We don't need refresh for this operation - did: userDid, - handle: userSession.handle, - }); + // Restore the session - returns an OAuthSession object directly + const session = await client.restore(userDid); - // Format the body as RichText to detect links, mentions, etc. - const rt = new RichText({ text: body }); + // Create an Agent from the session + const agent = new Agent(session); + + console.log('[POST /api/nodes] Successfully restored OAuth session and created agent'); + + // Bluesky posts are limited to 300 graphemes + // Format a concise post with title and truncated body + const maxLength = 280; // Leave room for ellipsis + const fullText = `${title}\n\n${body}`; + + let postText: string; + if (fullText.length <= maxLength) { + postText = fullText; + } else { + // Truncate at word boundary + const truncated = fullText.substring(0, maxLength); + const lastSpace = truncated.lastIndexOf(' '); + postText = truncated.substring(0, lastSpace > 0 ? lastSpace : maxLength) + '...'; + } + + // Format the text as RichText to detect links, mentions, etc. + const rt = new RichText({ text: postText }); await rt.detectFacets(agent); - // Create the ATproto record + // Create the ATproto record using standard Bluesky post collection + // This works with OAuth scope 'atproto' without requiring granular permissions const response = await agent.api.com.atproto.repo.createRecord({ repo: userDid, - collection: 'com.ponderants.node', + collection: 'app.bsky.feed.post', record: { - $type: 'com.ponderants.node', - title, - body: rt.text, + $type: 'app.bsky.feed.post', + text: rt.text, facets: rt.facets, - links: links || [], createdAt, + // Add a tag to identify this as a Ponderants node + tags: ['ponderants-node'], }, }); - atp_uri = response.uri; - atp_cid = response.cid; + atp_uri = response.data.uri; + atp_cid = response.data.cid; + + console.log('[POST /api/nodes] ✓ Published to ATproto PDS as standard post:', atp_uri); } catch (error) { - console.error('ATproto write error:', error); + console.error('[POST /api/nodes] ATproto write error:', error); return NextResponse.json({ error: 'Failed to publish to PDS' }, { status: 500 }); } // --- Step 2: Generate AI Embedding (Cache) --- - let embedding: number[]; + // Embeddings are optional - used for vector search and 3D visualization + let embedding: number[] | undefined; try { embedding = await generateEmbedding(title + '\n' + body); + console.log('[POST /api/nodes] ✓ Generated embedding vector'); } catch (error) { - console.error('Embedding error:', error); - return NextResponse.json({ error: 'Failed to generate embedding' }, { status: 500 }); + console.warn('[POST /api/nodes] ⚠ Embedding generation failed (non-critical):', error); + // Continue without embedding - it's only needed for advanced features + embedding = undefined; } // --- Step 3: Write to App View Cache (SurrealDB) --- + // The cache is optional - the ATproto PDS is the source of truth try { - const db = await connectToDB(surrealJwt); + const db = await connectToDB(); // Create the node record in our cache. // The `user_did` field is set, satisfying the 'PERMISSIONS' // clause defined in the schema. - const newNode = await db.create('node', { + const nodeData: any = { user_did: userDid, atp_uri: atp_uri, title: title, body: body, // Store the raw text body - embedding: embedding, // coords_3d will be calculated later by UMAP - }); + }; + + // Only include embedding if it was successfully generated + if (embedding) { + nodeData.embedding = embedding; + } + + const newNode = await db.create('node', nodeData); // Handle linking if (links && links.length > 0) { @@ -120,11 +156,16 @@ export async function POST(request: NextRequest) { } } - return NextResponse.json(newNode); + console.log('[POST /api/nodes] ✓ Cached node in SurrealDB'); + return NextResponse.json({ success: true, atp_uri, node: newNode }); } catch (error) { - console.error('SurrealDB write error:', error); - // TODO: Implement rollback for the ATproto post? - // This is a known limitation of the write-through cache pattern. - return NextResponse.json({ error: 'Failed to save to app cache' }, { status: 500 }); + console.warn('[POST /api/nodes] ⚠ SurrealDB cache write failed (non-critical):', error); + // The node was successfully published to ATproto (source of truth) + // Cache failure is non-critical - advanced features may be unavailable + return NextResponse.json({ + success: true, + atp_uri, + warning: 'Node published to Bluesky, but cache update failed. Advanced features may be unavailable.', + }); } } diff --git a/app/api/suggest-links/route.ts b/app/api/suggest-links/route.ts index 5085924..3d616a2 100644 --- a/app/api/suggest-links/route.ts +++ b/app/api/suggest-links/route.ts @@ -2,6 +2,7 @@ import { NextRequest, NextResponse } from 'next/server'; import { cookies } from 'next/headers'; import { connectToDB } from '@/lib/db'; import { generateEmbedding } from '@/lib/ai'; +import { verifySurrealJwt } from '@/lib/auth/jwt'; /** * POST /api/suggest-links @@ -18,6 +19,14 @@ export async function POST(request: NextRequest) { return NextResponse.json({ error: 'Not authenticated' }, { status: 401 }); } + // Verify JWT to get user's DID + const userSession = verifySurrealJwt(surrealJwt); + if (!userSession) { + return NextResponse.json({ error: 'Invalid auth token' }, { status: 401 }); + } + + const { did: userDid } = userSession; + const { body } = (await request.json()) as { body: string }; if (!body) { @@ -28,15 +37,13 @@ export async function POST(request: NextRequest) { // 1. Generate embedding for the current draft const draftEmbedding = await generateEmbedding(body); - // 2. Connect to DB (as the user) - // This enforces row-level security - user can only search their own nodes - const db = await connectToDB(surrealJwt); + // 2. Connect to DB with root credentials + const db = await connectToDB(); // 3. Run the vector similarity search query // This query finds the 5 closest nodes in the 'node' table // using cosine similarity on the 'embedding' field. - // It only searches nodes WHERE user_did = $token.did, - // which is enforced by the table's PERMISSIONS. + // We filter by user_did to ensure users only see their own nodes. const query = ` SELECT id, @@ -45,6 +52,7 @@ export async function POST(request: NextRequest) { atp_uri, vector::similarity::cosine(embedding, $draft_embedding) AS score FROM node + WHERE user_did = $user_did ORDER BY score DESC LIMIT 5; `; @@ -57,6 +65,7 @@ export async function POST(request: NextRequest) { score: number; }>]>(query, { draft_embedding: draftEmbedding, + user_did: userDid, }); // The query returns an array of result sets. We want the first one. diff --git a/app/api/tts/route.ts b/app/api/tts/route.ts new file mode 100644 index 0000000..a3b4111 --- /dev/null +++ b/app/api/tts/route.ts @@ -0,0 +1,83 @@ +import { NextRequest, NextResponse } from 'next/server'; +import { createClient } from '@deepgram/sdk'; + +/** + * Text-to-Speech API route using Deepgram Aura + * + * Converts text to natural-sounding speech using Deepgram's Aura-2 model. + * Returns audio data that can be played in the browser. + */ +export async function POST(request: NextRequest) { + const deepgramApiKey = process.env.DEEPGRAM_API_KEY; + + if (!deepgramApiKey) { + return NextResponse.json( + { error: 'Deepgram API key not configured' }, + { status: 500 } + ); + } + + try { + const { text } = await request.json(); + + if (!text || typeof text !== 'string') { + return NextResponse.json( + { error: 'Text parameter is required' }, + { status: 400 } + ); + } + + console.log('[TTS] Generating speech for text:', text.substring(0, 50) + '...'); + + const deepgram = createClient(deepgramApiKey); + + // Generate speech using Deepgram Aura + const response = await deepgram.speak.request( + { text }, + { + model: 'aura-2-thalia-en', // Natural female voice + encoding: 'linear16', + container: 'wav', + } + ); + + // Get the audio stream + const stream = await response.getStream(); + + if (!stream) { + throw new Error('No audio stream returned from Deepgram'); + } + + // Convert stream to buffer + const chunks: Uint8Array[] = []; + const reader = stream.getReader(); + + try { + while (true) { + const { done, value } = await reader.read(); + if (done) break; + if (value) chunks.push(value); + } + } finally { + reader.releaseLock(); + } + + const buffer = Buffer.concat(chunks); + + console.log('[TTS] ✓ Generated', buffer.length, 'bytes of audio'); + + // Return audio with proper headers + return new NextResponse(buffer, { + headers: { + 'Content-Type': 'audio/wav', + 'Content-Length': buffer.length.toString(), + }, + }); + } catch (error) { + console.error('[TTS] Error generating speech:', error); + return NextResponse.json( + { error: 'Failed to generate speech' }, + { status: 500 } + ); + } +} diff --git a/app/chat/page.tsx b/app/chat/page.tsx index 7757f29..4e9d626 100644 --- a/app/chat/page.tsx +++ b/app/chat/page.tsx @@ -12,25 +12,116 @@ import { Group, Text, Loader, - ActionIcon, Tooltip, } from '@mantine/core'; -import { useRef, useState, useEffect } from 'react'; -import { MicrophoneRecorder } from '@/components/MicrophoneRecorder'; +import { useRef, useEffect, useState } from 'react'; +import { IconVolume, IconMicrophone, IconNotes } from '@tabler/icons-react'; import { UserMenu } from '@/components/UserMenu'; +import { useVoiceMode } from '@/hooks/useVoiceMode'; +import { useAppMachine } from '@/hooks/useAppMachine'; +import { notifications } from '@mantine/notifications'; +import { useMediaQuery } from '@mantine/hooks'; + +/** + * Get the voice button text based on the current state + */ +function getVoiceButtonText(state: any): string { + if (state.matches('idle')) { + return 'Start Voice Conversation'; + } else if (state.matches('checkingForGreeting')) { + return 'Checking for greeting...'; + } else if (state.matches('listening')) { + return 'Listening... Start speaking'; + } else if (state.matches('userSpeaking')) { + return 'Speaking... (will auto-submit after 3s silence)'; + } else if (state.matches('timingOut')) { + return 'Speaking... (auto-submits soon)'; + } else if (state.matches('submittingUser')) { + return 'Submitting...'; + } else if (state.matches('waitingForAI')) { + return 'Waiting for AI...'; + } else if (state.matches('generatingTTS')) { + return 'Generating speech...'; + } else if (state.matches('playingTTS')) { + return 'AI is speaking...'; + } + return 'Start Voice Conversation'; +} export default function ChatPage() { const viewport = useRef(null); + const { messages, sendMessage, setMessages, status } = useChat(); + const isMobile = useMediaQuery('(max-width: 768px)'); + + // Text input state (managed manually since useChat doesn't provide form helpers) const [input, setInput] = useState(''); - const { messages, sendMessage, setMessages, status } = useChat({ - api: '/api/chat', - body: { - persona: 'Socratic', + // App machine for navigation + const appActor = useAppMachine(); + + // State for creating node + const [isCreatingNode, setIsCreatingNode] = useState(false); + + // Use the clean voice mode hook + const { state, send, transcript, error } = useVoiceMode({ + messages, + status, + onSubmit: (text: string) => { + sendMessage({ text }); }, - credentials: 'include', }); + // Handler to create node from conversation + const handleCreateNode = async () => { + if (messages.length === 0) { + notifications.show({ + title: 'No conversation', + message: 'Start a conversation before creating a node', + color: 'red', + }); + return; + } + + setIsCreatingNode(true); + + try { + const response = await fetch('/api/generate-node-draft', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + credentials: 'include', // Include cookies for authentication + body: JSON.stringify({ messages }), + }); + + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.error || 'Failed to generate node draft'); + } + + const { draft } = await response.json(); + + // Transition to edit mode with the draft + appActor.send({ + type: 'CREATE_NODE_FROM_CONVERSATION', + draft, + }); + + notifications.show({ + title: 'Node draft created', + message: 'Review and edit your node before publishing', + color: 'green', + }); + } catch (error) { + console.error('[Create Node] Error:', error); + notifications.show({ + title: 'Error', + message: error instanceof Error ? error.message : 'Failed to create node draft', + color: 'red', + }); + } finally { + setIsCreatingNode(false); + } + }; + // Add initial greeting message on first load useEffect(() => { if (messages.length === 0) { @@ -44,7 +135,7 @@ export default function ChatPage() { text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.', }, ], - }, + } as any, ]); } }, []); @@ -57,16 +148,7 @@ export default function ChatPage() { }); }, [messages]); - const handleSubmit = (e: React.FormEvent) => { - e.preventDefault(); - if (!input.trim() || status === 'submitted' || status === 'streaming') return; - - sendMessage({ text: input }); - setInput(''); - }; - const handleNewConversation = () => { - // Clear all messages and reset to initial greeting setMessages([ { id: 'initial-greeting', @@ -77,35 +159,65 @@ export default function ChatPage() { text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.', }, ], - }, + } as any, ]); }; - return ( - - - - Ponderants Interview - - - - - - - - + const isVoiceActive = !state.matches('idle'); + const canSkipAudio = state.hasTag('canSkipAudio'); - + {/* Fixed Header */} + + + + Convo + {!isMobile && ( + + + + + + + + + + )} + + + + + {/* Scrollable Messages Area */} + {messages.map((m) => ( - {m.role === 'user' ? 'You' : 'AI'} - {m.parts.map((part, i) => { - if (part.type === 'text') { - return ( - - {part.text} - - ); + + {m.role === 'user' ? 'You' : 'AI'} + + {(() => { + if ('parts' in m && Array.isArray((m as any).parts)) { + return (m as any).parts.map((part: any, i: number) => { + if (part.type === 'text') { + return ( + + {part.text} + + ); + } + return null; + }); } - - // Handle tool calls (e.g., suggest_node) - if (part.type === 'tool-call') { - return ( - - - 💡 Node Suggestion - - {part.args.title} - - {part.args.content} - - {part.args.tags && part.args.tags.length > 0 && ( - - {part.args.tags.map((tag: string, tagIdx: number) => ( - - #{tag} - - ))} - - )} - - ); - } - - return null; - })} + return Message content unavailable; + })()} ))} - {/* Typing indicator while AI is generating a response */} + {/* Typing indicator */} {(status === 'submitted' || status === 'streaming') && ( - AI + + AI + - Thinking... + + Thinking... + )} + + {/* Show current transcript while speaking */} + {transcript && (state.matches('userSpeaking') || state.matches('timingOut')) && ( + + + You (speaking...) + + {transcript} + + )} -
- - - setInput(e.currentTarget.value)} - placeholder="Speak or type your thoughts..." - style={{ flex: 1 }} - styles={{ - input: { - paddingLeft: '1rem', - paddingRight: '0.5rem', - }, - }} - variant="unstyled" - disabled={status === 'submitted' || status === 'streaming'} - /> - - {/* Microphone Recorder */} - { - setInput(transcript); - }} - onTranscriptFinalized={(transcript) => { - setInput(transcript); - setTimeout(() => { - const form = document.querySelector('form'); - if (form) { - form.requestSubmit(); - } - }, 100); - }} - /> - + {/* Fixed Voice Mode Controls */} + + + + + {/* Main Voice Button */} + + {/* Skip Button */} + {canSkipAudio && ( + + )} - - + + {/* Development Test Controls */} + {process.env.NODE_ENV === 'development' && ( + + + + DEV: State Machine Testing + + + State: {JSON.stringify(state.value)} | Tags: {Array.from(state.tags).join(', ')} + + + + + + + + + + )} + + {/* Text Input */} +
{ + e.preventDefault(); + if (input.trim() && !isVoiceActive) { + sendMessage({ text: input }); + setInput(''); + } + }} + > + + setInput(e.currentTarget.value)} + placeholder="Or type your thoughts here..." + style={{ flex: 1 }} + variant="filled" + disabled={isVoiceActive} + /> + + +
+ + {/* Error Display */} + {error && ( + + Error: {error} + + )} + +
+ ); } diff --git a/app/chat/page.tsx.backup b/app/chat/page.tsx.backup new file mode 100644 index 0000000..dd408cd --- /dev/null +++ b/app/chat/page.tsx.backup @@ -0,0 +1,664 @@ +'use client'; + +import { useChat } from '@ai-sdk/react'; +import { + Stack, + TextInput, + Button, + Paper, + ScrollArea, + Title, + Container, + Group, + Text, + Loader, + ActionIcon, + Tooltip, +} from '@mantine/core'; +import { useRef, useState, useEffect, useCallback } from 'react'; +import { IconVolume, IconMicrophone, IconMicrophoneOff } from '@tabler/icons-react'; +import { UserMenu } from '@/components/UserMenu'; + +// Define the shape of the Deepgram transcript +interface DeepgramTranscript { + channel: { + alternatives: Array<{ + transcript: string; + }>; + }; + is_final: boolean; + speech_final: boolean; +} + +type VoiceState = 'idle' | 'listening' | 'user-speaking' | 'processing' | 'ai-speaking'; + +export default function ChatPage() { + const viewport = useRef(null); + const [input, setInput] = useState(''); + const [voiceState, setVoiceState] = useState('idle'); + const [countdown, setCountdown] = useState(3); + const [isGeneratingSpeech, setIsGeneratingSpeech] = useState(false); + const lastSpokenMessageId = useRef(null); + const audioRef = useRef(null); + const mediaRecorderRef = useRef(null); + const socketRef = useRef(null); + const transcriptRef = useRef(''); + const silenceTimeoutRef = useRef(null); + const silenceStartTimeRef = useRef(null); + const countdownIntervalRef = useRef(null); + const hasStartedSpeakingRef = useRef(false); + + const { messages, sendMessage, setMessages, status } = useChat({ + api: '/api/chat', + body: { + persona: 'Socratic', + }, + credentials: 'include', + }); + + // Handle AI response in voice conversation mode + useEffect(() => { + if (voiceState !== 'processing') return; + + console.log('[Voice Mode] Effect running - voiceState: processing, status:', status, 'messages:', messages.length); + + // Wait until the AI response is complete (status returns to 'ready') + if (status !== 'ready') { + console.log('[Voice Mode] Waiting for status to be ready, current:', status); + return; + } + + // Find the latest assistant message + console.log('[Voice Mode] All messages:', messages.map(m => ({ role: m.role, id: m.id, preview: m.parts[0]?.text?.substring(0, 30) }))); + + const lastAssistantMessage = [...messages] + .reverse() + .find((m) => m.role === 'assistant'); + + if (!lastAssistantMessage) { + console.log('[Voice Mode] No assistant message found'); + return; + } + + console.log('[Voice Mode] Selected message ID:', lastAssistantMessage.id); + console.log('[Voice Mode] Selected message text preview:', lastAssistantMessage.parts.find(p => p.type === 'text')?.text?.substring(0, 50)); + console.log('[Voice Mode] Last spoken message ID:', lastSpokenMessageId.current); + + // Skip if we've already spoken this message + if (lastSpokenMessageId.current === lastAssistantMessage.id) { + console.log('[Voice Mode] Already spoke this message, skipping'); + return; + } + + // Extract text from the message + const textPart = lastAssistantMessage.parts.find((p) => p.type === 'text'); + if (!textPart || !textPart.text) { + console.log('[Voice Mode] No text part found in message'); + return; + } + + // Play the audio and transition to ai-speaking state + console.log('[Voice Mode] Transitioning to ai-speaking, will play audio'); + setVoiceState('ai-speaking'); + playAudio(textPart.text, lastAssistantMessage.id); + }, [messages, voiceState, status]); + + const playAudio = async (text: string, messageId: string) => { + try { + console.log('[Voice Mode] Generating speech for message:', messageId); + setIsGeneratingSpeech(true); + + const response = await fetch('/api/tts', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ text }), + }); + + if (!response.ok) { + throw new Error('Failed to generate speech'); + } + + const audioBlob = await response.blob(); + const audioUrl = URL.createObjectURL(audioBlob); + + // Create or reuse audio element + if (!audioRef.current) { + audioRef.current = new Audio(); + } + + audioRef.current.src = audioUrl; + audioRef.current.onended = () => { + URL.revokeObjectURL(audioUrl); + console.log('[Voice Mode] ✓ Finished playing audio, starting new listening session'); + lastSpokenMessageId.current = messageId; + setIsGeneratingSpeech(false); + + // After AI finishes speaking, go back to listening for user + startListening(); + }; + + audioRef.current.onerror = () => { + URL.revokeObjectURL(audioUrl); + console.error('[Voice Mode] Error playing audio'); + setIsGeneratingSpeech(false); + // On error, also go back to listening + startListening(); + }; + + await audioRef.current.play(); + console.log('[Voice Mode] ✓ Playing audio'); + setIsGeneratingSpeech(false); // Audio is now playing + } catch (error) { + console.error('[Voice Mode] Error:', error); + setIsGeneratingSpeech(false); + // On error, go back to listening + startListening(); + } + }; + + const submitUserInput = useCallback(() => { + // Clear any pending silence timeout and countdown + if (silenceTimeoutRef.current) { + clearTimeout(silenceTimeoutRef.current); + silenceTimeoutRef.current = null; + } + if (countdownIntervalRef.current) { + clearInterval(countdownIntervalRef.current); + countdownIntervalRef.current = null; + } + silenceStartTimeRef.current = null; + setCountdown(3); + + // Stop recording + if (mediaRecorderRef.current) { + mediaRecorderRef.current.stop(); + mediaRecorderRef.current = null; + } + if (socketRef.current) { + socketRef.current.close(); + socketRef.current = null; + } + + // Reset speaking flag + hasStartedSpeakingRef.current = false; + + // Send the transcript as a message if we have one + if (transcriptRef.current.trim()) { + console.log('[Voice Mode] Submitting transcript:', transcriptRef.current); + setInput(transcriptRef.current); + setVoiceState('processing'); + + setTimeout(() => { + const form = document.querySelector('form'); + if (form) { + console.log('[Voice Mode] Form found, submitting...'); + form.requestSubmit(); + } else { + console.error('[Voice Mode] Form not found!'); + } + }, 100); + } else { + // If no transcript, go back to listening + console.log('[Voice Mode] No transcript to submit, going back to listening'); + startListening(); + } + + transcriptRef.current = ''; + }, []); + + const startListening = useCallback(async () => { + transcriptRef.current = ''; + setInput(''); + hasStartedSpeakingRef.current = false; + // DON'T reset lastSpokenMessageId here - we need it to track what we've already spoken + silenceStartTimeRef.current = null; + setCountdown(3); + setVoiceState('listening'); + + try { + // 1. Get the Deepgram API key + const response = await fetch('/api/voice-token', { method: 'POST' }); + const data = await response.json(); + + if (data.error) { + throw new Error(data.error); + } + + const { key } = data; + + // 2. Access the microphone + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + + // 3. Open direct WebSocket to Deepgram with voice activity detection + const socket = new WebSocket( + 'wss://api.deepgram.com/v1/listen?interim_results=true&punctuate=true&vad_events=true', + ['token', key] + ); + socketRef.current = socket; + + socket.onopen = () => { + console.log('[Voice Mode] ✓ WebSocket connected, listening for speech...'); + + // 4. Create MediaRecorder + const mediaRecorder = new MediaRecorder(stream, { + mimeType: 'audio/webm', + }); + mediaRecorderRef.current = mediaRecorder; + + // 5. Send audio chunks on data available + mediaRecorder.ondataavailable = (event) => { + if (event.data.size > 0 && socket.readyState === WebSocket.OPEN) { + socket.send(event.data); + } + }; + + // Start recording and chunking audio every 250ms + mediaRecorder.start(250); + }; + + // 6. Receive transcripts and handle silence detection + socket.onmessage = (event) => { + const data = JSON.parse(event.data) as DeepgramTranscript; + + // Check if this message has alternatives (some Deepgram messages don't) + if (!data.channel?.alternatives) { + return; // Skip non-transcript messages (metadata, VAD events, etc.) + } + + const transcript = data.channel.alternatives[0]?.transcript || ''; + + if (transcript) { + // User has started speaking + if (!hasStartedSpeakingRef.current) { + console.log('[Voice Mode] User started speaking'); + hasStartedSpeakingRef.current = true; + setVoiceState('user-speaking'); + } + + // Clear any existing silence timeout and countdown + if (silenceTimeoutRef.current) { + clearTimeout(silenceTimeoutRef.current); + silenceTimeoutRef.current = null; + } + if (countdownIntervalRef.current) { + clearInterval(countdownIntervalRef.current); + countdownIntervalRef.current = null; + } + silenceStartTimeRef.current = null; + setCountdown(3); + + // Handle transcript updates + if (data.is_final) { + // This is a finalized phrase - append it to our transcript + transcriptRef.current = transcriptRef.current + ? transcriptRef.current + ' ' + transcript + : transcript; + setInput(transcriptRef.current); + console.log('[Voice Mode] Finalized phrase:', transcript); + + // Start a generous 3-second silence timer after each finalized phrase + silenceStartTimeRef.current = Date.now(); + + // Update countdown every 100ms + countdownIntervalRef.current = setInterval(() => { + if (silenceStartTimeRef.current) { + const elapsed = Date.now() - silenceStartTimeRef.current; + const remaining = Math.max(0, 3 - elapsed / 1000); + setCountdown(remaining); + } + }, 100); + + silenceTimeoutRef.current = setTimeout(() => { + console.log('[Voice Mode] 3 seconds of silence detected, submitting...'); + submitUserInput(); + }, 3000); + } else { + // This is an interim result - show it temporarily + const displayText = transcriptRef.current + ? transcriptRef.current + ' ' + transcript + : transcript; + setInput(displayText); + } + } + }; + + socket.onclose = () => { + // Clean up stream + stream.getTracks().forEach((track) => track.stop()); + console.log('[Voice Mode] WebSocket closed'); + }; + + socket.onerror = (err) => { + console.error('[Voice Mode] WebSocket error:', err); + setVoiceState('idle'); + }; + } catch (error) { + console.error('[Voice Mode] Error starting listening:', error); + setVoiceState('idle'); + } + }, [submitUserInput]); + + const skipAudioAndListen = useCallback(() => { + console.log('[Voice Mode] Skipping audio playback'); + + // Stop current audio + if (audioRef.current) { + audioRef.current.pause(); + audioRef.current.currentTime = 0; + } + + setIsGeneratingSpeech(false); + + // Go straight to listening + startListening(); + }, [startListening]); + + const exitVoiceMode = useCallback(() => { + // Clear any timeouts and intervals + if (silenceTimeoutRef.current) { + clearTimeout(silenceTimeoutRef.current); + silenceTimeoutRef.current = null; + } + if (countdownIntervalRef.current) { + clearInterval(countdownIntervalRef.current); + countdownIntervalRef.current = null; + } + silenceStartTimeRef.current = null; + + // Stop recording + if (mediaRecorderRef.current) { + mediaRecorderRef.current.stop(); + mediaRecorderRef.current = null; + } + if (socketRef.current) { + socketRef.current.close(); + socketRef.current = null; + } + + // Stop audio playback + if (audioRef.current) { + audioRef.current.pause(); + audioRef.current = null; + } + + hasStartedSpeakingRef.current = false; + lastSpokenMessageId.current = null; + transcriptRef.current = ''; + setInput(''); + setCountdown(3); + setIsGeneratingSpeech(false); + setVoiceState('idle'); + console.log('[Voice Mode] Exited voice conversation mode'); + }, []); + + const handleToggleVoiceMode = useCallback(() => { + if (voiceState === 'idle') { + // Start voice conversation mode + // First, check if there's a recent AI message to read out + const lastAssistantMessage = [...messages] + .reverse() + .find((m) => m.role === 'assistant'); + + if (lastAssistantMessage) { + // Extract text from the message + const textPart = lastAssistantMessage.parts.find((p) => p.type === 'text'); + + if (textPart && textPart.text) { + // Play the most recent AI message first, then start listening + console.log('[Voice Mode] Starting voice mode, reading most recent AI message first'); + setVoiceState('ai-speaking'); + playAudio(textPart.text, lastAssistantMessage.id); + return; + } + } + + // No AI message to read, just start listening + startListening(); + } else { + // Exit voice conversation mode + exitVoiceMode(); + } + }, [voiceState, startListening, exitVoiceMode, messages]); + + // Add initial greeting message on first load + useEffect(() => { + if (messages.length === 0) { + setMessages([ + { + id: 'initial-greeting', + role: 'assistant', + parts: [ + { + type: 'text', + text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.', + }, + ], + }, + ]); + } + }, []); + + // Auto-scroll to bottom + useEffect(() => { + viewport.current?.scrollTo({ + top: viewport.current.scrollHeight, + behavior: 'smooth', + }); + }, [messages]); + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault(); + if (!input.trim() || status === 'submitted' || status === 'streaming') return; + + sendMessage({ text: input }); + setInput(''); + }; + + const handleNewConversation = () => { + // Clear all messages and reset to initial greeting + setMessages([ + { + id: 'initial-greeting', + role: 'assistant', + parts: [ + { + type: 'text', + text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.', + }, + ], + }, + ]); + }; + + return ( + + + + Ponderants Interview + + + + + + + + + + + + {messages.map((m) => ( + + {m.role === 'user' ? 'You' : 'AI'} + {m.parts.map((part, i) => { + if (part.type === 'text') { + return ( + + {part.text} + + ); + } + + // Handle tool calls (e.g., suggest_node) + if (part.type === 'tool-call') { + return ( + + + 💡 Node Suggestion + + {part.args.title} + + {part.args.content} + + {part.args.tags && part.args.tags.length > 0 && ( + + {part.args.tags.map((tag: string, tagIdx: number) => ( + + #{tag} + + ))} + + )} + + ); + } + + return null; + })} + + ))} + + {/* Typing indicator while AI is generating a response */} + {(status === 'submitted' || status === 'streaming') && ( + + AI + + + Thinking... + + + )} + + + + + {/* Big Voice Mode Button - shown above text input */} + + + + + + {/* Skip button - only shown when AI is speaking */} + {voiceState === 'ai-speaking' && ( + + )} + + + {/* Text Input - always available */} +
+ + setInput(e.currentTarget.value)} + placeholder="Or type your thoughts here..." + style={{ flex: 1 }} + variant="filled" + disabled={voiceState !== 'idle'} + /> + + +
+
+
+
+ ); +} diff --git a/app/chat/page.tsx.old b/app/chat/page.tsx.old new file mode 100644 index 0000000..ddd17e9 --- /dev/null +++ b/app/chat/page.tsx.old @@ -0,0 +1,814 @@ +'use client'; + +import { useChat } from '@ai-sdk/react'; +import { + Stack, + TextInput, + Button, + Paper, + ScrollArea, + Title, + Container, + Group, + Text, + Loader, + ActionIcon, + Tooltip, +} from '@mantine/core'; +import { useRef, useState, useEffect, useCallback } from 'react'; +import { IconVolume, IconMicrophone, IconMicrophoneOff } from '@tabler/icons-react'; +import { createActor } from 'xstate'; +import { useSelector } from '@xstate/react'; +import { appMachine } from '@/lib/app-machine'; +import { UserMenu } from '@/components/UserMenu'; + +// Define the shape of the Deepgram transcript +interface DeepgramTranscript { + channel: { + alternatives: Array<{ + transcript: string; + }>; + }; + is_final: boolean; + speech_final: boolean; +} + +/** + * Get the voice button text based on the current state tags. + * This replaces complex nested ternaries with a clean, readable function. + */ +function getVoiceButtonText( + state: ReturnType>, + silenceStartTime: number | null +): string { + // Check tags in priority order and return appropriate text + let buttonText: string; + + if (state.hasTag('textMode') || state.hasTag('voiceIdle')) { + buttonText = 'Start Voice Conversation'; + } else if (state.hasTag('listening')) { + buttonText = 'Listening... Start speaking'; + } else if (state.hasTag('userSpeaking')) { + buttonText = 'Speaking... (will auto-submit after 3s silence)'; + } else if (state.hasTag('timingOut')) { + if (silenceStartTime) { + const elapsed = Date.now() - silenceStartTime; + const remaining = Math.max(0, 3 - elapsed / 1000); + buttonText = `Speaking... (auto-submits in ${remaining.toFixed(1)}s)`; + } else { + buttonText = 'Speaking... (timing out...)'; + } + } else if (state.hasTag('processing')) { + buttonText = 'Processing...'; + } else if (state.hasTag('aiGenerating')) { + buttonText = 'Generating speech...'; + } else if (state.hasTag('aiSpeaking')) { + buttonText = 'AI is speaking... Please wait'; + } else { + // Fallback (should never reach here if tags are properly defined) + buttonText = 'Start Voice Conversation'; + console.warn('[Voice Mode] No matching tag found, using fallback text. Active tags:', state.tags); + } + + console.log('[Voice Mode] Button text determined:', buttonText, 'Active tags:', Array.from(state.tags)); + return buttonText; +} + +export default function ChatPage() { + const viewport = useRef(null); + + // XState machine for voice mode state management + const [actorRef] = useState(() => createActor(appMachine).start()); + const state = useSelector(actorRef, (snapshot) => snapshot); + const send = actorRef.send.bind(actorRef); + + // Imperative refs for managing side effects + const audioRef = useRef(null); + const mediaRecorderRef = useRef(null); + const socketRef = useRef(null); + const silenceTimeoutRef = useRef(null); + const silenceStartTimeRef = useRef(null); + const countdownIntervalRef = useRef(null); + const shouldCancelAudioRef = useRef(false); // Flag to cancel pending audio operations + + const { messages, sendMessage, setMessages, status } = useChat(); + + // Extract text from message (handles v5 parts structure) + const getMessageText = (msg: any): string => { + if ('parts' in msg && Array.isArray(msg.parts)) { + const textPart = msg.parts.find((p: any) => p.type === 'text'); + return textPart?.text || ''; + } + return msg.content || ''; + }; + + // Handle AI response in voice conversation mode - SIMPLE VERSION + useEffect(() => { + if (!state.hasTag('processing')) return; + if (status !== 'ready') { + console.log('[Voice Mode] Waiting, status:', status); + return; + } + + const transcript = state.context.transcript?.trim(); + if (!transcript) return; + + console.log('[Voice Mode] === PROCESSING ==='); + console.log('[Voice Mode] Transcript:', transcript); + console.log('[Voice Mode] Messages:', messages.length); + + // Get last 2 messages + const lastMsg = messages[messages.length - 1]; + const secondLastMsg = messages[messages.length - 2]; + + console.log('[Voice Mode] Last msg:', lastMsg?.role, getMessageText(lastMsg || {}).substring(0, 30)); + console.log('[Voice Mode] 2nd last msg:', secondLastMsg?.role, getMessageText(secondLastMsg || {}).substring(0, 30)); + + // Case 1: User message not submitted yet + // Check if the last message is the user's transcript + const userMessageExists = messages.some(m => + m.role === 'user' && getMessageText(m) === transcript + ); + + if (!userMessageExists) { + console.log('[Voice Mode] → Submitting user message'); + submitUserInput(); + return; + } + + // Case 2: User message submitted, check if AI has responded + // After user submits, if AI responds, the new AI message is LAST + if (lastMsg && lastMsg.role === 'assistant' && + secondLastMsg && secondLastMsg.role === 'user' && + getMessageText(secondLastMsg) === transcript) { + + const aiMsg = lastMsg; + console.log('[Voice Mode] → AI response found:', aiMsg.id); + console.log('[Voice Mode] → Last spoken:', state.context.lastSpokenMessageId); + + // Only play if we haven't played this message yet + if (state.context.lastSpokenMessageId !== aiMsg.id) { + const text = getMessageText(aiMsg); + console.log('[Voice Mode] → Playing:', text.substring(0, 50) + '...'); + send({ type: 'AI_RESPONSE_READY', messageId: aiMsg.id, text }); + playAudio(text, aiMsg.id); + } else { + console.log('[Voice Mode] → Already played, skipping'); + } + return; + } + + // Case 3: Waiting for AI response + console.log('[Voice Mode] → Waiting for AI response...'); + }, [messages, state, status, send]); + + + // Stop all audio playback and cancel pending operations + const stopAllAudio = useCallback(() => { + console.log('[Voice Mode] Stopping all audio operations'); + + // Set cancel flag to prevent any pending audio from playing + shouldCancelAudioRef.current = true; + + // Stop and clean up audio element + if (audioRef.current) { + audioRef.current.pause(); + audioRef.current.currentTime = 0; + audioRef.current.src = ''; + } + }, []); + + const playAudio = async (text: string, messageId: string) => { + try { + // Reset cancel flag at the start of a new audio operation + shouldCancelAudioRef.current = false; + + console.log('[Voice Mode] Generating speech for message:', messageId); + console.log('[Voice Mode] State transition:', state.value); + + const response = await fetch('/api/tts', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ text }), + }); + + // Check if we should cancel before continuing + if (shouldCancelAudioRef.current) { + console.log('[Voice Mode] Audio generation canceled before blob creation'); + return; + } + + if (!response.ok) { + throw new Error('Failed to generate speech'); + } + + const audioBlob = await response.blob(); + + // Check again after async operation + if (shouldCancelAudioRef.current) { + console.log('[Voice Mode] Audio generation canceled after blob creation'); + return; + } + + const audioUrl = URL.createObjectURL(audioBlob); + + // Create or reuse audio element + if (!audioRef.current) { + audioRef.current = new Audio(); + } + + audioRef.current.src = audioUrl; + audioRef.current.onended = () => { + URL.revokeObjectURL(audioUrl); + console.log('[Voice Mode] ✓ Finished playing audio, sending TTS_FINISHED event'); + console.log('[Voice Mode] State transition:', state.value); + send({ type: 'TTS_FINISHED', messageId }); + + // After AI finishes speaking, go back to listening for user + startListening(); + }; + + audioRef.current.onerror = () => { + URL.revokeObjectURL(audioUrl); + console.error('[Voice Mode] Error playing audio'); + // On error, also go back to listening + startListening(); + }; + + // Final check before playing + if (shouldCancelAudioRef.current) { + console.log('[Voice Mode] Audio playback canceled before play()'); + URL.revokeObjectURL(audioUrl); + return; + } + + await audioRef.current.play(); + + // Only send TTS_PLAYING if we haven't been canceled + if (!shouldCancelAudioRef.current) { + console.log('[Voice Mode] ✓ Playing audio, sending TTS_PLAYING event'); + console.log('[Voice Mode] State transition:', state.value); + send({ type: 'TTS_PLAYING' }); + } else { + console.log('[Voice Mode] Audio playback canceled after play()'); + URL.revokeObjectURL(audioUrl); + } + } catch (error) { + console.error('[Voice Mode] Error:', error); + // On error, go back to listening + startListening(); + } + }; + + const submitUserInput = useCallback(() => { + // Clear any pending silence timeout and countdown + if (silenceTimeoutRef.current) { + clearTimeout(silenceTimeoutRef.current); + silenceTimeoutRef.current = null; + } + if (countdownIntervalRef.current) { + clearInterval(countdownIntervalRef.current); + countdownIntervalRef.current = null; + } + silenceStartTimeRef.current = null; + + // Stop recording + if (mediaRecorderRef.current) { + mediaRecorderRef.current.stop(); + mediaRecorderRef.current = null; + } + if (socketRef.current) { + socketRef.current.close(); + socketRef.current = null; + } + + // Send the transcript as a message if we have one + const transcript = state.context.transcript; + if (transcript.trim()) { + console.log('[Voice Mode] Submitting transcript:', transcript); + console.log('[Voice Mode] State transition:', state.value); + + setTimeout(() => { + const form = document.querySelector('form'); + if (form) { + console.log('[Voice Mode] Form found, submitting...'); + form.requestSubmit(); + } else { + console.error('[Voice Mode] Form not found!'); + } + }, 100); + } else { + // If no transcript, go back to listening + console.log('[Voice Mode] No transcript to submit, going back to listening'); + startListening(); + } + }, [state, send]); + + const startListening = useCallback(async () => { + silenceStartTimeRef.current = null; + + // Send event to enter listening state (which clears transcript/input/countdown) + console.log('[Voice Mode] Sending START_LISTENING event (implicitly via state transition)'); + console.log('[Voice Mode] State transition:', state.value); + + try { + // 1. Get the Deepgram API key + const response = await fetch('/api/voice-token', { method: 'POST' }); + const data = await response.json(); + + if (data.error) { + throw new Error(data.error); + } + + const { key } = data; + + // 2. Access the microphone + const stream = await navigator.mediaDevices.getUserMedia({ audio: true }); + + // 3. Open direct WebSocket to Deepgram with voice activity detection + const socket = new WebSocket( + 'wss://api.deepgram.com/v1/listen?interim_results=true&punctuate=true&vad_events=true', + ['token', key] + ); + socketRef.current = socket; + + socket.onopen = () => { + console.log('[Voice Mode] ✓ WebSocket connected, listening for speech...'); + console.log('[Voice Mode] State transition:', state.value); + + // 4. Create MediaRecorder + const mediaRecorder = new MediaRecorder(stream, { + mimeType: 'audio/webm', + }); + mediaRecorderRef.current = mediaRecorder; + + // 5. Send audio chunks on data available + mediaRecorder.ondataavailable = (event) => { + if (event.data.size > 0 && socket.readyState === WebSocket.OPEN) { + socket.send(event.data); + } + }; + + // Start recording and chunking audio every 250ms + mediaRecorder.start(250); + }; + + // 6. Receive transcripts and handle silence detection + socket.onmessage = (event) => { + const data = JSON.parse(event.data) as DeepgramTranscript; + + // Check if this message has alternatives (some Deepgram messages don't) + if (!data.channel?.alternatives) { + return; // Skip non-transcript messages (metadata, VAD events, etc.) + } + + const transcript = data.channel.alternatives[0]?.transcript || ''; + + if (transcript) { + // User has started speaking + if (!state.context.hasStartedSpeaking) { + console.log('[Voice Mode] User started speaking, sending USER_STARTED_SPEAKING event'); + console.log('[Voice Mode] State transition:', state.value); + send({ type: 'USER_STARTED_SPEAKING' }); + } + + // Clear any existing silence timeout and countdown + if (silenceTimeoutRef.current) { + clearTimeout(silenceTimeoutRef.current); + silenceTimeoutRef.current = null; + } + if (countdownIntervalRef.current) { + clearInterval(countdownIntervalRef.current); + countdownIntervalRef.current = null; + } + silenceStartTimeRef.current = null; + + // Handle transcript updates + if (data.is_final) { + // This is a finalized phrase - send to machine + console.log('[Voice Mode] === FINALIZED PHRASE ==='); + console.log('[Voice Mode] Transcript:', transcript); + console.log('[Voice Mode] state.value BEFORE:', JSON.stringify(state.value)); + console.log('[Voice Mode] tags BEFORE:', Array.from(state.tags)); + console.log('[Voice Mode] context BEFORE:', JSON.stringify(state.context)); + console.log('[Voice Mode] Sending FINALIZED_PHRASE event'); + send({ type: 'FINALIZED_PHRASE', phrase: transcript }); + + // Start a generous 3-second silence timer after each finalized phrase + silenceStartTimeRef.current = Date.now(); + + // Update countdown every 100ms + countdownIntervalRef.current = setInterval(() => { + if (silenceStartTimeRef.current) { + const elapsed = Date.now() - silenceStartTimeRef.current; + const remaining = Math.max(0, 3 - elapsed / 1000); + // Note: countdown is now managed in machine context, but we need + // to update it frequently for UI display. This is acceptable as + // a UI-only side effect. + } + }, 100); + + silenceTimeoutRef.current = setTimeout(() => { + console.log('[Voice Mode] 3 seconds of silence detected, sending SILENCE_TIMEOUT event'); + console.log('[Voice Mode] State transition:', state.value); + send({ type: 'SILENCE_TIMEOUT' }); + // Note: submitUserInput will be called by the processing state effect + }, 3000); + } else { + // This is an interim result - update display (send TRANSCRIPT_UPDATE) + const currentTranscript = state.context.transcript; + const displayText = currentTranscript + ? currentTranscript + ' ' + transcript + : transcript; + send({ type: 'TRANSCRIPT_UPDATE', transcript: displayText }); + } + } + }; + + socket.onclose = () => { + // Clean up stream + stream.getTracks().forEach((track) => track.stop()); + console.log('[Voice Mode] WebSocket closed'); + console.log('[Voice Mode] State transition:', state.value); + }; + + socket.onerror = (err) => { + console.error('[Voice Mode] WebSocket error:', err); + console.log('[Voice Mode] State transition:', state.value); + // On error, toggle back to text mode if we're in voice mode + if (!state.hasTag('textMode')) { + send({ type: 'TOGGLE_VOICE_MODE' }); + } + }; + } catch (error) { + console.error('[Voice Mode] Error starting listening:', error); + console.log('[Voice Mode] State transition:', state.value); + // On error, toggle back to text mode if we're in voice mode + if (!state.hasTag('textMode')) { + send({ type: 'TOGGLE_VOICE_MODE' }); + } + } + }, [submitUserInput, state, send]); + + const skipAudioAndListen = useCallback(() => { + console.log('[Voice Mode] === SKIP BUTTON CLICKED ==='); + console.log('[Voice Mode] Current state.value:', JSON.stringify(state.value)); + console.log('[Voice Mode] Current tags:', Array.from(state.tags)); + + // Stop ALL audio operations + stopAllAudio(); + + // Send skip event + send({ type: 'SKIP_AUDIO' }); + + // Go straight to listening + startListening(); + }, [startListening, state, send, stopAllAudio]); + + const handleToggleVoiceMode = useCallback(() => { + console.log('[Voice Mode] Voice button pressed, sending TOGGLE_VOICE_MODE event'); + console.log('[Voice Mode] Current state:', state.value); + send({ type: 'TOGGLE_VOICE_MODE' }); + }, [state, send]); + + // Handle entering voice.idle state (after TOGGLE_VOICE_MODE from text mode) + useEffect(() => { + if (!state.hasTag('voiceIdle')) return; + + console.log('[Voice Mode] Entered voice.idle, checking for AI message to read'); + + // Get ALL assistant messages in order + const assistantMessages = messages.filter((m) => m.role === 'assistant'); + console.log('[Voice Mode] (idle) Found', assistantMessages.length, 'assistant messages'); + + if (assistantMessages.length === 0) { + console.log('[Voice Mode] (idle) No assistant messages, starting listening'); + send({ type: 'START_LISTENING' }); + startListening(); + return; + } + + // Get the LAST (most recent) assistant message + const latestAssistantMessage = assistantMessages[assistantMessages.length - 1]; + console.log('[Voice Mode] (idle) Latest message ID:', latestAssistantMessage.id); + console.log('[Voice Mode] (idle) Last spoken message ID:', state.context.lastSpokenMessageId); + + // Skip if we've already spoken this message + if (state.context.lastSpokenMessageId === latestAssistantMessage.id) { + console.log('[Voice Mode] (idle) Already spoke latest message, starting listening'); + send({ type: 'START_LISTENING' }); + startListening(); + return; + } + + // Extract text from the message + let text = ''; + if ('parts' in latestAssistantMessage && Array.isArray((latestAssistantMessage as any).parts)) { + const textPart = (latestAssistantMessage as any).parts.find((p: any) => p.type === 'text'); + text = textPart?.text || ''; + } + + if (text) { + // Play the most recent AI message first, then start listening + console.log('[Voice Mode] (idle) Reading latest AI message:', text.substring(0, 50) + '...'); + send({ type: 'AI_RESPONSE_READY', messageId: latestAssistantMessage.id, text }); + playAudio(text, latestAssistantMessage.id); + return; + } + + // No text found, just start listening + console.log('[Voice Mode] (idle) No text in latest message, starting listening'); + send({ type: 'START_LISTENING' }); + startListening(); + }, [state, messages, send]); + + // Stop audio when leaving audio-related states + useEffect(() => { + const isInAudioState = state.hasTag('canSkipAudio'); + + if (!isInAudioState) { + // We're not in an audio state, make sure everything is stopped + stopAllAudio(); + } + }, [state, stopAllAudio]); + + // Log state transitions for debugging + useEffect(() => { + console.log('[Voice Mode] === STATE TRANSITION ==='); + console.log('[Voice Mode] state.value:', JSON.stringify(state.value)); + console.log('[Voice Mode] Active tags:', Array.from(state.tags)); + console.log('[Voice Mode] Context:', JSON.stringify(state.context)); + }, [state.value]); + + // Add initial greeting message on first load + useEffect(() => { + if (messages.length === 0) { + setMessages([ + { + id: 'initial-greeting', + role: 'assistant', + parts: [ + { + type: 'text', + text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.', + }, + ], + } as any, + ]); + } + }, []); + + // Auto-scroll to bottom + useEffect(() => { + viewport.current?.scrollTo({ + top: viewport.current.scrollHeight, + behavior: 'smooth', + }); + }, [messages]); + + const handleSubmit = (e: React.FormEvent) => { + e.preventDefault(); + const inputText = state.context.input; + if (!inputText.trim() || status === 'submitted' || status === 'streaming') return; + + console.log('[Voice Mode] Submitting message:', inputText); + console.log('[Voice Mode] State transition:', state.value); + + sendMessage({ text: inputText }); + // Clear input via machine context (will be cleared on next state transition) + }; + + const handleNewConversation = () => { + // Clear all messages and reset to initial greeting + setMessages([ + { + id: 'initial-greeting', + role: 'assistant', + parts: [ + { + type: 'text', + text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.', + }, + ], + } as any, + ]); + }; + + return ( + + + + Ponderants Interview + + + + + + + + + + + + {messages.map((m) => ( + + {m.role === 'user' ? 'You' : 'AI'} + {/* Extract text from message parts */} + {(() => { + if ('parts' in m && Array.isArray((m as any).parts)) { + return (m as any).parts.map((part: any, i: number) => { + if (part.type === 'text') { + return ( + + {part.text} + + ); + } + return null; + }); + } + return Message content unavailable; + })()} + + ))} + + {/* Typing indicator while AI is generating a response */} + {(status === 'submitted' || status === 'streaming') && ( + + AI + + + Thinking... + + + )} + + + + + {/* Big Voice Mode Button - shown above text input */} + + + + + + {/* Skip button - shown when audio can be skipped */} + {state.hasTag('canSkipAudio') && ( + + )} + + + {/* Test Controls - Development Only */} + {process.env.NODE_ENV === 'development' && ( + + + DEV: State Machine Testing + + State: {JSON.stringify(state.value)} | Tags: {Array.from(state.tags).join(', ')} + + + + + + + + + + + )} + + {/* Text Input - always available */} +
+ + send({ type: 'TRANSCRIPT_UPDATE', transcript: e.currentTarget.value })} + placeholder="Or type your thoughts here..." + style={{ flex: 1 }} + variant="filled" + disabled={!state.hasTag('textMode') && !state.hasTag('voiceIdle')} + /> + + +
+
+
+
+ ); +} diff --git a/app/edit/page.tsx b/app/edit/page.tsx new file mode 100644 index 0000000..7973d5b --- /dev/null +++ b/app/edit/page.tsx @@ -0,0 +1,302 @@ +'use client'; + +/** + * Edit Node Page + * + * Editor for reviewing and publishing node drafts generated from conversations. + * Displays the AI-generated draft and allows editing before publishing. + */ + +import { + Stack, + Title, + Text, + Paper, + TextInput, + Textarea, + Button, + Group, + Container, + Divider, + Checkbox, + Badge, + Loader, +} from '@mantine/core'; +import { useState, useEffect } from 'react'; +import { IconDeviceFloppy, IconX, IconRefresh } from '@tabler/icons-react'; +import { useAppMachine } from '@/hooks/useAppMachine'; +import { useSelector } from '@xstate/react'; +import { notifications } from '@mantine/notifications'; + +interface SuggestedNode { + id: string; + title: string; + body: string; + atp_uri: string; + score: number; +} + +export default function EditPage() { + const appActor = useAppMachine(); + const pendingDraft = useSelector(appActor, (state) => state.context.pendingNodeDraft); + + const [title, setTitle] = useState(''); + const [content, setContent] = useState(''); + const [isPublishing, setIsPublishing] = useState(false); + const [suggestedNodes, setSuggestedNodes] = useState([]); + const [selectedLinks, setSelectedLinks] = useState([]); + const [isLoadingSuggestions, setIsLoadingSuggestions] = useState(false); + + // Load draft when available + useEffect(() => { + if (pendingDraft) { + setTitle(pendingDraft.title); + setContent(pendingDraft.content); + } + }, [pendingDraft]); + + // Fetch link suggestions when content changes + const fetchLinkSuggestions = async () => { + if (!content.trim() || content.trim().length < 50) { + setSuggestedNodes([]); + return; + } + + setIsLoadingSuggestions(true); + try { + const response = await fetch('/api/suggest-links', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + credentials: 'include', + body: JSON.stringify({ body: content }), + }); + + if (!response.ok) { + throw new Error('Failed to fetch suggestions'); + } + + const suggestions = await response.json(); + setSuggestedNodes(suggestions); + } catch (error) { + console.error('[Link Suggestions] Error:', error); + } finally { + setIsLoadingSuggestions(false); + } + }; + + // Auto-fetch suggestions when content is substantial + useEffect(() => { + const timer = setTimeout(() => { + if (content.trim().length >= 50) { + fetchLinkSuggestions(); + } + }, 1000); // Debounce 1 second + + return () => clearTimeout(timer); + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [content]); // fetchLinkSuggestions is stable and doesn't need to be in deps + + const handlePublish = async () => { + if (!title.trim() || !content.trim()) { + notifications.show({ + title: 'Missing content', + message: 'Please provide both a title and content for your node', + color: 'red', + }); + return; + } + + setIsPublishing(true); + + try { + const response = await fetch('/api/nodes', { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + credentials: 'include', // Include cookies for authentication + body: JSON.stringify({ + title: title.trim(), + body: content.trim(), + links: selectedLinks, + }), + }); + + if (!response.ok) { + const errorData = await response.json(); + throw new Error(errorData.error || 'Failed to publish node'); + } + + const result = await response.json(); + + // Show success notification + const message = result.warning || 'Your node has been published to your Bluesky account'; + notifications.show({ + title: 'Node published!', + message, + color: result.warning ? 'yellow' : 'green', + }); + + // Transition back to conversation view + // (Galaxy view requires the cache, which may have failed) + appActor.send({ + type: 'CANCEL_EDIT', // Go back to conversation + }); + } catch (error) { + console.error('[Publish Node] Error:', error); + notifications.show({ + title: 'Error', + message: error instanceof Error ? error.message : 'Failed to publish node', + color: 'red', + }); + } finally { + setIsPublishing(false); + } + }; + + const handleCancel = () => { + if (pendingDraft) { + appActor.send({ type: 'CANCEL_EDIT' }); + } else { + // Manual node creation - go back to conversation + appActor.send({ type: 'NAVIGATE_TO_CONVO' }); + } + }; + + const toggleLinkSelection = (nodeId: string) => { + setSelectedLinks((prev) => + prev.includes(nodeId) + ? prev.filter((id) => id !== nodeId) + : [...prev, nodeId] + ); + }; + + return ( + + + + Edit Node + + + + + + + + + setTitle(e.currentTarget.value)} + size="lg" + required + /> + + + +