feat: Improve UI layout and navigation

- Increase logo size (48x48 desktop, 56x56 mobile) for better visibility
- Add logo as favicon
- Add logo to mobile header
- Move user menu to navigation bars (sidebar on desktop, bottom bar on mobile)
- Fix desktop chat layout - container structure prevents voice controls cutoff
- Fix mobile bottom bar - use icon-only ActionIcons instead of truncated text buttons
- Hide Create Node/New Conversation buttons on mobile to save header space
- Make fixed header and voice controls work properly with containers

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-09 14:43:11 +00:00
parent 47b35b9caf
commit 0ed2d6c0b3
57 changed files with 6996 additions and 629 deletions

View File

@@ -100,9 +100,11 @@ export async function GET(request: NextRequest) {
// Parse custom state to determine redirect URL
let returnTo = '/chat';
try {
const customState = JSON.parse(state);
if (customState.returnTo) {
returnTo = customState.returnTo;
if (state) {
const customState = JSON.parse(state);
if (customState.returnTo) {
returnTo = customState.returnTo;
}
}
} catch {
// Invalid state JSON, use default

View File

@@ -50,7 +50,7 @@ export async function POST(request: NextRequest) {
if (error instanceof z.ZodError) {
return NextResponse.json(
{ error: 'Invalid request', details: error.errors },
{ error: 'Invalid request', details: error.issues },
{ status: 400 }
);
}

View File

@@ -1,6 +1,8 @@
import { NextRequest, NextResponse } from 'next/server';
import { cookies } from 'next/headers';
import { UMAP } from 'umap-js';
import { connectToDB } from '@/lib/db';
import { verifySurrealJwt } from '@/lib/auth/jwt';
/**
* POST /api/calculate-graph
@@ -19,28 +21,16 @@ export async function POST(request: NextRequest) {
return NextResponse.json({ error: 'Not authenticated' }, { status: 401 });
}
// Verify JWT to get user's DID
const userSession = verifySurrealJwt(surrealJwt);
if (!userSession) {
return NextResponse.json({ error: 'Invalid auth token' }, { status: 401 });
}
const { did: userDid } = userSession;
try {
// NOTE: For the hackathon, we use root credentials instead of JWT auth for simplicity.
// In production, this should use user-scoped authentication with proper SCOPE configuration.
const db = new (await import('surrealdb')).default();
await db.connect(process.env.SURREALDB_URL!);
await db.signin({
username: process.env.SURREALDB_USER!,
password: process.env.SURREALDB_PASS!,
});
await db.use({
namespace: process.env.SURREALDB_NS!,
database: process.env.SURREALDB_DB!,
});
// Get the user's DID from the JWT to filter nodes
const jwt = require('jsonwebtoken');
const decoded = jwt.decode(surrealJwt) as { did: string };
const userDid = decoded?.did;
if (!userDid) {
return NextResponse.json({ error: 'Invalid authentication token' }, { status: 401 });
}
const db = await connectToDB();
// 1. Fetch all nodes that have an embedding but no coords_3d (filtered by user_did)
const query = `SELECT id, embedding FROM node WHERE user_did = $userDid AND embedding != NONE AND coords_3d = NONE`;

View File

@@ -53,10 +53,26 @@ For all other conversation, just respond as a helpful AI.`;
messages: convertToModelMessages(messages),
// Provide the schema as a 'tool' to the model
// Tools in AI SDK v5 use inputSchema instead of parameters
tools: {
suggest_node: {
description: 'Suggest a new thought node when an idea is complete.',
schema: NodeSuggestionSchema,
inputSchema: z.object({
title: z
.string()
.describe('A concise, descriptive title for the thought node.'),
content: z
.string()
.describe('The full, well-structured content of the thought node.'),
tags: z
.array(z.string())
.optional()
.describe('Optional tags for categorizing the node.'),
}),
execute: async ({ title, content, tags }) => ({
success: true,
suggestion: { title, content, tags },
}),
},
},
});

View File

@@ -0,0 +1,62 @@
import { NextRequest, NextResponse } from 'next/server';
import { cookies } from 'next/headers';
import { connectToDB } from '@/lib/db';
import { verifySurrealJwt } from '@/lib/auth/jwt';
/**
* GET /api/debug/nodes
*
* Debug route to inspect node storage
*/
export async function GET(request: NextRequest) {
const cookieStore = await cookies();
const surrealJwt = cookieStore.get('ponderants-auth')?.value;
if (!surrealJwt) {
return NextResponse.json({ error: 'Not authenticated' }, { status: 401 });
}
const userSession = verifySurrealJwt(surrealJwt);
if (!userSession) {
return NextResponse.json({ error: 'Invalid auth token' }, { status: 401 });
}
const { did: userDid } = userSession;
try {
const db = await connectToDB();
// Get all nodes for this user
const nodesQuery = `
SELECT id, title, body, atp_uri, embedding, coords_3d
FROM node
WHERE user_did = $userDid
`;
const results = await db.query(nodesQuery, { userDid });
const nodes = results[0] || [];
// Count stats
const stats = {
total: nodes.length,
with_embeddings: nodes.filter((n: any) => n.embedding).length,
with_coords: nodes.filter((n: any) => n.coords_3d).length,
without_embeddings: nodes.filter((n: any) => !n.embedding).length,
without_coords: nodes.filter((n: any) => !n.coords_3d).length,
};
return NextResponse.json({
stats,
nodes: nodes.map((n: any) => ({
id: n.id,
title: n.title,
atp_uri: n.atp_uri,
has_embedding: !!n.embedding,
has_coords: !!n.coords_3d,
coords_3d: n.coords_3d,
})),
});
} catch (error) {
console.error('[Debug Nodes] Error:', error);
return NextResponse.json({ error: String(error) }, { status: 500 });
}
}

105
app/api/galaxy/route.ts Normal file
View File

@@ -0,0 +1,105 @@
import { NextRequest, NextResponse } from 'next/server';
import { cookies } from 'next/headers';
import { connectToDB } from '@/lib/db';
import { verifySurrealJwt } from '@/lib/auth/jwt';
interface NodeData {
id: string;
title: string;
coords_3d: [number, number, number];
}
interface LinkData {
in: string;
out: string;
}
/**
* GET /api/galaxy
*
* Fetches nodes with 3D coordinates and their links for visualization.
* Automatically triggers graph calculation if needed.
*/
export async function GET(request: NextRequest) {
const cookieStore = await cookies();
const surrealJwt = cookieStore.get('ponderants-auth')?.value;
if (!surrealJwt) {
return NextResponse.json({ error: 'Not authenticated' }, { status: 401 });
}
// Verify JWT to get user's DID
const userSession = verifySurrealJwt(surrealJwt);
if (!userSession) {
return NextResponse.json({ error: 'Invalid auth token' }, { status: 401 });
}
const { did: userDid } = userSession;
try {
const db = await connectToDB();
// Fetch nodes that have 3D coordinates
const nodesQuery = `
SELECT id, title, coords_3d
FROM node
WHERE user_did = $userDid AND coords_3d != NONE
`;
const nodeResults = await db.query<[NodeData[]]>(nodesQuery, { userDid });
const nodes = nodeResults[0] || [];
// Fetch links between nodes
const linksQuery = `
SELECT in, out
FROM links_to
`;
const linkResults = await db.query<[LinkData[]]>(linksQuery);
const links = linkResults[0] || [];
// If we have nodes but no coordinates, check if we should calculate
if (nodes.length === 0) {
// Check if we have nodes with embeddings but no coordinates
const unmappedQuery = `
SELECT count() as count
FROM node
WHERE user_did = $userDid AND embedding != NONE AND coords_3d = NONE
GROUP ALL
`;
const unmappedResults = await db.query<[Array<{ count: number }>]>(unmappedQuery, { userDid });
const unmappedCount = unmappedResults[0]?.[0]?.count || 0;
if (unmappedCount >= 3) {
console.log(`[Galaxy API] Found ${unmappedCount} unmapped nodes, triggering calculation...`);
// Trigger graph calculation (don't await, return current state)
fetch(`${process.env.NEXT_PUBLIC_BASE_URL || 'http://localhost:3000'}/api/calculate-graph`, {
method: 'POST',
headers: {
'Cookie': `ponderants-auth=${surrealJwt}`,
},
}).catch((err) => {
console.error('[Galaxy API] Failed to trigger graph calculation:', err);
});
return NextResponse.json({
nodes: [],
links: [],
message: 'Calculating 3D coordinates... Refresh in a moment.',
});
}
}
console.log(`[Galaxy API] Returning ${nodes.length} nodes and ${links.length} links`);
return NextResponse.json({
nodes,
links,
});
} catch (error) {
console.error('[Galaxy API] Error:', error);
return NextResponse.json(
{ error: 'Failed to fetch galaxy data' },
{ status: 500 }
);
}
}

View File

@@ -0,0 +1,114 @@
/**
* API Route: Generate Node Draft
*
* Takes a conversation history and uses AI to generate a structured node draft
* with title and content that captures the key insights from the conversation.
*/
import { google } from '@ai-sdk/google';
import { generateText } from 'ai';
import { NextRequest, NextResponse } from 'next/server';
const model = google('gemini-2.0-flash-exp');
export async function POST(request: NextRequest) {
try {
const { messages } = await request.json();
if (!Array.isArray(messages) || messages.length === 0) {
return NextResponse.json(
{ error: 'Invalid or empty conversation' },
{ status: 400 }
);
}
// Format conversation for the AI
const conversationText = messages
.map((m: any) => {
const role = m.role === 'user' ? 'User' : 'AI';
let content = '';
if ('parts' in m && Array.isArray(m.parts)) {
const textParts = m.parts.filter((p: any) => p.type === 'text');
content = textParts.map((p: any) => p.text).join('\n');
} else if (m.content) {
content = m.content;
}
return `${role}: ${content}`;
})
.join('\n\n');
// Generate node draft using AI
const result = await generateText({
model,
prompt: `You are helping a user capture their thoughts as a structured "Node" - a mini blog post.
Analyze the following conversation and create a Node draft that:
1. Captures the core insight or topic discussed
2. Structures the content coherently
3. Preserves the user's voice and key ideas
4. Focuses on the most important takeaways
Conversation:
${conversationText}
Respond with a JSON object containing:
- title: A concise, compelling title (3-8 words)
- content: The main body in markdown format (200-500 words, use headings/lists where appropriate)
Format your response as valid JSON only, no additional text.`,
});
// Parse the AI response
let draft;
try {
draft = JSON.parse(result.text);
} catch (e) {
// If JSON parsing fails, try to extract from markdown code block
const jsonMatch = result.text.match(/```json\s*([\s\S]*?)\s*```/);
if (jsonMatch) {
draft = JSON.parse(jsonMatch[1]);
} else {
throw new Error('Failed to parse AI response as JSON');
}
}
// Validate the draft structure
if (!draft.title || !draft.content) {
throw new Error('Generated draft missing required fields');
}
// Add conversation context (last 3 messages for reference)
const contextMessages = messages.slice(-3);
const conversationContext = contextMessages
.map((m: any) => {
const role = m.role === 'user' ? 'User' : 'AI';
let content = '';
if ('parts' in m && Array.isArray(m.parts)) {
const textParts = m.parts.filter((p: any) => p.type === 'text');
content = textParts.map((p: any) => p.text).join('\n');
} else if (m.content) {
content = m.content;
}
return `${role}: ${content}`;
})
.join('\n\n');
return NextResponse.json({
draft: {
title: draft.title,
content: draft.content,
conversationContext,
},
});
} catch (error) {
console.error('[Generate Node Draft] Error:', error);
return NextResponse.json(
{ error: error instanceof Error ? error.message : 'Failed to generate node draft' },
{ status: 500 }
);
}
}

View File

@@ -1,27 +1,35 @@
import { NextRequest, NextResponse } from 'next/server';
import { cookies } from 'next/headers';
import { AtpAgent, RichText } from '@atproto/api';
import { RichText, Agent } from '@atproto/api';
import { connectToDB } from '@/lib/db';
import { generateEmbedding } from '@/lib/ai';
import { verifySurrealJwt } from '@/lib/auth/jwt';
import { getOAuthClient } from '@/lib/auth/oauth-client';
export async function POST(request: NextRequest) {
const cookieStore = await cookies();
const surrealJwt = cookieStore.get('ponderants-auth')?.value;
const atpAccessToken = cookieStore.get('atproto_access_token')?.value;
if (!surrealJwt || !atpAccessToken) {
console.log('[POST /api/nodes] Auth check:', {
hasSurrealJwt: !!surrealJwt,
});
if (!surrealJwt) {
console.error('[POST /api/nodes] Missing auth cookie');
return NextResponse.json({ error: 'Not authenticated' }, { status: 401 });
}
// Verify the JWT and extract user info
const userSession = verifySurrealJwt(surrealJwt);
if (!userSession) {
console.error('[POST /api/nodes] Invalid JWT');
return NextResponse.json({ error: 'Invalid auth token' }, { status: 401 });
}
const { did: userDid } = userSession;
console.log('[POST /api/nodes] Verified user DID:', userDid);
const { title, body, links } = (await request.json()) as {
title: string;
body: string;
@@ -39,67 +47,95 @@ export async function POST(request: NextRequest) {
let atp_cid: string;
try {
// Get the PDS URL from environment or use default
const pdsUrl = process.env.BLUESKY_PDS_URL || 'https://bsky.social';
const agent = new AtpAgent({ service: pdsUrl });
// Get the OAuth client and restore the user's session
const client = await getOAuthClient();
console.log('[POST /api/nodes] Got OAuth client, attempting to restore session for DID:', userDid);
// Resume the session with the access token
await agent.resumeSession({
accessJwt: atpAccessToken,
refreshJwt: '', // We don't need refresh for this operation
did: userDid,
handle: userSession.handle,
});
// Restore the session - returns an OAuthSession object directly
const session = await client.restore(userDid);
// Format the body as RichText to detect links, mentions, etc.
const rt = new RichText({ text: body });
// Create an Agent from the session
const agent = new Agent(session);
console.log('[POST /api/nodes] Successfully restored OAuth session and created agent');
// Bluesky posts are limited to 300 graphemes
// Format a concise post with title and truncated body
const maxLength = 280; // Leave room for ellipsis
const fullText = `${title}\n\n${body}`;
let postText: string;
if (fullText.length <= maxLength) {
postText = fullText;
} else {
// Truncate at word boundary
const truncated = fullText.substring(0, maxLength);
const lastSpace = truncated.lastIndexOf(' ');
postText = truncated.substring(0, lastSpace > 0 ? lastSpace : maxLength) + '...';
}
// Format the text as RichText to detect links, mentions, etc.
const rt = new RichText({ text: postText });
await rt.detectFacets(agent);
// Create the ATproto record
// Create the ATproto record using standard Bluesky post collection
// This works with OAuth scope 'atproto' without requiring granular permissions
const response = await agent.api.com.atproto.repo.createRecord({
repo: userDid,
collection: 'com.ponderants.node',
collection: 'app.bsky.feed.post',
record: {
$type: 'com.ponderants.node',
title,
body: rt.text,
$type: 'app.bsky.feed.post',
text: rt.text,
facets: rt.facets,
links: links || [],
createdAt,
// Add a tag to identify this as a Ponderants node
tags: ['ponderants-node'],
},
});
atp_uri = response.uri;
atp_cid = response.cid;
atp_uri = response.data.uri;
atp_cid = response.data.cid;
console.log('[POST /api/nodes] ✓ Published to ATproto PDS as standard post:', atp_uri);
} catch (error) {
console.error('ATproto write error:', error);
console.error('[POST /api/nodes] ATproto write error:', error);
return NextResponse.json({ error: 'Failed to publish to PDS' }, { status: 500 });
}
// --- Step 2: Generate AI Embedding (Cache) ---
let embedding: number[];
// Embeddings are optional - used for vector search and 3D visualization
let embedding: number[] | undefined;
try {
embedding = await generateEmbedding(title + '\n' + body);
console.log('[POST /api/nodes] ✓ Generated embedding vector');
} catch (error) {
console.error('Embedding error:', error);
return NextResponse.json({ error: 'Failed to generate embedding' }, { status: 500 });
console.warn('[POST /api/nodes] ⚠ Embedding generation failed (non-critical):', error);
// Continue without embedding - it's only needed for advanced features
embedding = undefined;
}
// --- Step 3: Write to App View Cache (SurrealDB) ---
// The cache is optional - the ATproto PDS is the source of truth
try {
const db = await connectToDB(surrealJwt);
const db = await connectToDB();
// Create the node record in our cache.
// The `user_did` field is set, satisfying the 'PERMISSIONS'
// clause defined in the schema.
const newNode = await db.create('node', {
const nodeData: any = {
user_did: userDid,
atp_uri: atp_uri,
title: title,
body: body, // Store the raw text body
embedding: embedding,
// coords_3d will be calculated later by UMAP
});
};
// Only include embedding if it was successfully generated
if (embedding) {
nodeData.embedding = embedding;
}
const newNode = await db.create('node', nodeData);
// Handle linking
if (links && links.length > 0) {
@@ -120,11 +156,16 @@ export async function POST(request: NextRequest) {
}
}
return NextResponse.json(newNode);
console.log('[POST /api/nodes] ✓ Cached node in SurrealDB');
return NextResponse.json({ success: true, atp_uri, node: newNode });
} catch (error) {
console.error('SurrealDB write error:', error);
// TODO: Implement rollback for the ATproto post?
// This is a known limitation of the write-through cache pattern.
return NextResponse.json({ error: 'Failed to save to app cache' }, { status: 500 });
console.warn('[POST /api/nodes] ⚠ SurrealDB cache write failed (non-critical):', error);
// The node was successfully published to ATproto (source of truth)
// Cache failure is non-critical - advanced features may be unavailable
return NextResponse.json({
success: true,
atp_uri,
warning: 'Node published to Bluesky, but cache update failed. Advanced features may be unavailable.',
});
}
}

View File

@@ -2,6 +2,7 @@ import { NextRequest, NextResponse } from 'next/server';
import { cookies } from 'next/headers';
import { connectToDB } from '@/lib/db';
import { generateEmbedding } from '@/lib/ai';
import { verifySurrealJwt } from '@/lib/auth/jwt';
/**
* POST /api/suggest-links
@@ -18,6 +19,14 @@ export async function POST(request: NextRequest) {
return NextResponse.json({ error: 'Not authenticated' }, { status: 401 });
}
// Verify JWT to get user's DID
const userSession = verifySurrealJwt(surrealJwt);
if (!userSession) {
return NextResponse.json({ error: 'Invalid auth token' }, { status: 401 });
}
const { did: userDid } = userSession;
const { body } = (await request.json()) as { body: string };
if (!body) {
@@ -28,15 +37,13 @@ export async function POST(request: NextRequest) {
// 1. Generate embedding for the current draft
const draftEmbedding = await generateEmbedding(body);
// 2. Connect to DB (as the user)
// This enforces row-level security - user can only search their own nodes
const db = await connectToDB(surrealJwt);
// 2. Connect to DB with root credentials
const db = await connectToDB();
// 3. Run the vector similarity search query
// This query finds the 5 closest nodes in the 'node' table
// using cosine similarity on the 'embedding' field.
// It only searches nodes WHERE user_did = $token.did,
// which is enforced by the table's PERMISSIONS.
// We filter by user_did to ensure users only see their own nodes.
const query = `
SELECT
id,
@@ -45,6 +52,7 @@ export async function POST(request: NextRequest) {
atp_uri,
vector::similarity::cosine(embedding, $draft_embedding) AS score
FROM node
WHERE user_did = $user_did
ORDER BY score DESC
LIMIT 5;
`;
@@ -57,6 +65,7 @@ export async function POST(request: NextRequest) {
score: number;
}>]>(query, {
draft_embedding: draftEmbedding,
user_did: userDid,
});
// The query returns an array of result sets. We want the first one.

83
app/api/tts/route.ts Normal file
View File

@@ -0,0 +1,83 @@
import { NextRequest, NextResponse } from 'next/server';
import { createClient } from '@deepgram/sdk';
/**
* Text-to-Speech API route using Deepgram Aura
*
* Converts text to natural-sounding speech using Deepgram's Aura-2 model.
* Returns audio data that can be played in the browser.
*/
export async function POST(request: NextRequest) {
const deepgramApiKey = process.env.DEEPGRAM_API_KEY;
if (!deepgramApiKey) {
return NextResponse.json(
{ error: 'Deepgram API key not configured' },
{ status: 500 }
);
}
try {
const { text } = await request.json();
if (!text || typeof text !== 'string') {
return NextResponse.json(
{ error: 'Text parameter is required' },
{ status: 400 }
);
}
console.log('[TTS] Generating speech for text:', text.substring(0, 50) + '...');
const deepgram = createClient(deepgramApiKey);
// Generate speech using Deepgram Aura
const response = await deepgram.speak.request(
{ text },
{
model: 'aura-2-thalia-en', // Natural female voice
encoding: 'linear16',
container: 'wav',
}
);
// Get the audio stream
const stream = await response.getStream();
if (!stream) {
throw new Error('No audio stream returned from Deepgram');
}
// Convert stream to buffer
const chunks: Uint8Array[] = [];
const reader = stream.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
if (value) chunks.push(value);
}
} finally {
reader.releaseLock();
}
const buffer = Buffer.concat(chunks);
console.log('[TTS] ✓ Generated', buffer.length, 'bytes of audio');
// Return audio with proper headers
return new NextResponse(buffer, {
headers: {
'Content-Type': 'audio/wav',
'Content-Length': buffer.length.toString(),
},
});
} catch (error) {
console.error('[TTS] Error generating speech:', error);
return NextResponse.json(
{ error: 'Failed to generate speech' },
{ status: 500 }
);
}
}

View File

@@ -12,25 +12,116 @@ import {
Group,
Text,
Loader,
ActionIcon,
Tooltip,
} from '@mantine/core';
import { useRef, useState, useEffect } from 'react';
import { MicrophoneRecorder } from '@/components/MicrophoneRecorder';
import { useRef, useEffect, useState } from 'react';
import { IconVolume, IconMicrophone, IconNotes } from '@tabler/icons-react';
import { UserMenu } from '@/components/UserMenu';
import { useVoiceMode } from '@/hooks/useVoiceMode';
import { useAppMachine } from '@/hooks/useAppMachine';
import { notifications } from '@mantine/notifications';
import { useMediaQuery } from '@mantine/hooks';
/**
* Get the voice button text based on the current state
*/
function getVoiceButtonText(state: any): string {
if (state.matches('idle')) {
return 'Start Voice Conversation';
} else if (state.matches('checkingForGreeting')) {
return 'Checking for greeting...';
} else if (state.matches('listening')) {
return 'Listening... Start speaking';
} else if (state.matches('userSpeaking')) {
return 'Speaking... (will auto-submit after 3s silence)';
} else if (state.matches('timingOut')) {
return 'Speaking... (auto-submits soon)';
} else if (state.matches('submittingUser')) {
return 'Submitting...';
} else if (state.matches('waitingForAI')) {
return 'Waiting for AI...';
} else if (state.matches('generatingTTS')) {
return 'Generating speech...';
} else if (state.matches('playingTTS')) {
return 'AI is speaking...';
}
return 'Start Voice Conversation';
}
export default function ChatPage() {
const viewport = useRef<HTMLDivElement>(null);
const { messages, sendMessage, setMessages, status } = useChat();
const isMobile = useMediaQuery('(max-width: 768px)');
// Text input state (managed manually since useChat doesn't provide form helpers)
const [input, setInput] = useState('');
const { messages, sendMessage, setMessages, status } = useChat({
api: '/api/chat',
body: {
persona: 'Socratic',
// App machine for navigation
const appActor = useAppMachine();
// State for creating node
const [isCreatingNode, setIsCreatingNode] = useState(false);
// Use the clean voice mode hook
const { state, send, transcript, error } = useVoiceMode({
messages,
status,
onSubmit: (text: string) => {
sendMessage({ text });
},
credentials: 'include',
});
// Handler to create node from conversation
const handleCreateNode = async () => {
if (messages.length === 0) {
notifications.show({
title: 'No conversation',
message: 'Start a conversation before creating a node',
color: 'red',
});
return;
}
setIsCreatingNode(true);
try {
const response = await fetch('/api/generate-node-draft', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
credentials: 'include', // Include cookies for authentication
body: JSON.stringify({ messages }),
});
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData.error || 'Failed to generate node draft');
}
const { draft } = await response.json();
// Transition to edit mode with the draft
appActor.send({
type: 'CREATE_NODE_FROM_CONVERSATION',
draft,
});
notifications.show({
title: 'Node draft created',
message: 'Review and edit your node before publishing',
color: 'green',
});
} catch (error) {
console.error('[Create Node] Error:', error);
notifications.show({
title: 'Error',
message: error instanceof Error ? error.message : 'Failed to create node draft',
color: 'red',
});
} finally {
setIsCreatingNode(false);
}
};
// Add initial greeting message on first load
useEffect(() => {
if (messages.length === 0) {
@@ -44,7 +135,7 @@ export default function ChatPage() {
text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.',
},
],
},
} as any,
]);
}
}, []);
@@ -57,16 +148,7 @@ export default function ChatPage() {
});
}, [messages]);
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault();
if (!input.trim() || status === 'submitted' || status === 'streaming') return;
sendMessage({ text: input });
setInput('');
};
const handleNewConversation = () => {
// Clear all messages and reset to initial greeting
setMessages([
{
id: 'initial-greeting',
@@ -77,35 +159,65 @@ export default function ChatPage() {
text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.',
},
],
},
} as any,
]);
};
return (
<Container size="md" h="100vh" style={{ display: 'flex', flexDirection: 'column' }}>
<Group justify="space-between" py="md">
<Title order={2}>
Ponderants Interview
</Title>
<Group gap="md">
<Tooltip label="Start a new conversation">
<Button
variant="subtle"
onClick={handleNewConversation}
disabled={status === 'submitted' || status === 'streaming'}
>
New Conversation
</Button>
</Tooltip>
<UserMenu />
</Group>
</Group>
const isVoiceActive = !state.matches('idle');
const canSkipAudio = state.hasTag('canSkipAudio');
<ScrollArea
h="100%"
style={{ flex: 1 }}
viewportRef={viewport}
return (
<Container size="md" style={{ paddingTop: '80px', paddingBottom: '300px', maxWidth: '100%' }}>
{/* Fixed Header */}
<Paper
withBorder
p="md"
radius={0}
style={{
position: 'fixed',
top: 0,
left: 0,
right: 0,
zIndex: 50,
borderBottom: '1px solid #dee2e6',
backgroundColor: '#1a1b1e',
}}
>
<Container size="md">
<Group justify="space-between">
<Title order={2}>Convo</Title>
{!isMobile && (
<Group gap="md">
<Tooltip label="Generate a node from this conversation">
<Button
variant="light"
color="blue"
leftSection={<IconNotes size={18} />}
onClick={handleCreateNode}
loading={isCreatingNode}
disabled={messages.length === 0 || status === 'submitted' || status === 'streaming'}
>
Create Node
</Button>
</Tooltip>
<Tooltip label="Start a new conversation">
<Button
variant="subtle"
onClick={handleNewConversation}
disabled={status === 'submitted' || status === 'streaming'}
>
New Conversation
</Button>
</Tooltip>
<UserMenu />
</Group>
)}
</Group>
</Container>
</Paper>
{/* Scrollable Messages Area */}
<ScrollArea h="calc(100vh - 380px)" viewportRef={viewport}>
<Stack gap="md" pb="xl">
{messages.map((m) => (
<Paper
@@ -116,117 +228,223 @@ export default function ChatPage() {
radius="lg"
style={{
alignSelf: m.role === 'user' ? 'flex-end' : 'flex-start',
backgroundColor:
m.role === 'user' ? '#343a40' : '#212529',
backgroundColor: m.role === 'user' ? '#343a40' : '#212529',
}}
w="80%"
>
<Text fw={700} size="sm">{m.role === 'user' ? 'You' : 'AI'}</Text>
{m.parts.map((part, i) => {
if (part.type === 'text') {
return (
<Text key={i} style={{ whiteSpace: 'pre-wrap' }}>
{part.text}
</Text>
);
<Text fw={700} size="sm">
{m.role === 'user' ? 'You' : 'AI'}
</Text>
{(() => {
if ('parts' in m && Array.isArray((m as any).parts)) {
return (m as any).parts.map((part: any, i: number) => {
if (part.type === 'text') {
return (
<Text key={i} style={{ whiteSpace: 'pre-wrap' }}>
{part.text}
</Text>
);
}
return null;
});
}
// Handle tool calls (e.g., suggest_node)
if (part.type === 'tool-call') {
return (
<Paper key={i} withBorder p="xs" mt="xs" bg="dark.6">
<Text size="xs" c="dimmed" mb="xs">
💡 Node Suggestion
</Text>
<Text fw={600}>{part.args.title}</Text>
<Text size="sm" mt="xs">
{part.args.content}
</Text>
{part.args.tags && part.args.tags.length > 0 && (
<Group gap="xs" mt="xs">
{part.args.tags.map((tag: string, tagIdx: number) => (
<Text key={tagIdx} size="xs" c="blue.4">
#{tag}
</Text>
))}
</Group>
)}
</Paper>
);
}
return null;
})}
return <Text>Message content unavailable</Text>;
})()}
</Paper>
))}
{/* Typing indicator while AI is generating a response */}
{/* Typing indicator */}
{(status === 'submitted' || status === 'streaming') && (
<Paper
withBorder
shadow="md"
p="sm"
radius="lg"
style={{
alignSelf: 'flex-start',
backgroundColor: '#212529',
}}
style={{ alignSelf: 'flex-start', backgroundColor: '#212529' }}
w="80%"
>
<Text fw={700} size="sm">AI</Text>
<Text fw={700} size="sm">
AI
</Text>
<Group gap="xs" mt="xs">
<Loader size="xs" />
<Text size="sm" c="dimmed">Thinking...</Text>
<Text size="sm" c="dimmed">
Thinking...
</Text>
</Group>
</Paper>
)}
{/* Show current transcript while speaking */}
{transcript && (state.matches('userSpeaking') || state.matches('timingOut')) && (
<Paper
withBorder
shadow="md"
p="sm"
radius="lg"
style={{ alignSelf: 'flex-end', backgroundColor: '#343a40' }}
w="80%"
>
<Text fw={700} size="sm">
You (speaking...)
</Text>
<Text style={{ whiteSpace: 'pre-wrap' }}>{transcript}</Text>
</Paper>
)}
</Stack>
</ScrollArea>
<form onSubmit={handleSubmit}>
<Paper withBorder p="sm" radius="xl" my="md">
<Group>
<TextInput
value={input}
onChange={(e) => setInput(e.currentTarget.value)}
placeholder="Speak or type your thoughts..."
style={{ flex: 1 }}
styles={{
input: {
paddingLeft: '1rem',
paddingRight: '0.5rem',
},
}}
variant="unstyled"
disabled={status === 'submitted' || status === 'streaming'}
/>
{/* Microphone Recorder */}
<MicrophoneRecorder
onTranscriptUpdate={(transcript) => {
setInput(transcript);
}}
onTranscriptFinalized={(transcript) => {
setInput(transcript);
setTimeout(() => {
const form = document.querySelector('form');
if (form) {
form.requestSubmit();
}
}, 100);
}}
/>
{/* Fixed Voice Mode Controls */}
<Paper
withBorder
p="md"
radius={0}
style={{
position: 'fixed',
bottom: 0,
left: 0,
right: 0,
zIndex: 50,
borderTop: '1px solid #dee2e6',
backgroundColor: '#1a1b1e',
}}
>
<Container size="md">
<Stack gap="sm">
<Group gap="sm">
{/* Main Voice Button */}
<Button
type="submit"
onClick={() => send({ type: isVoiceActive ? 'STOP_VOICE' : 'START_VOICE' })}
size="xl"
radius="xl"
loading={status === 'submitted' || status === 'streaming'}
h={80}
style={{ flex: 1 }}
color={
canSkipAudio
? 'blue'
: state.matches('userSpeaking') || state.matches('timingOut')
? 'green'
: state.matches('listening')
? 'yellow'
: state.matches('waitingForAI') || state.matches('submittingUser')
? 'blue'
: 'gray'
}
variant={isVoiceActive ? 'filled' : 'light'}
leftSection={
canSkipAudio ? (
<IconVolume size={32} />
) : state.matches('userSpeaking') ||
state.matches('timingOut') ||
state.matches('listening') ? (
<IconMicrophone size={32} />
) : (
<IconMicrophone size={32} />
)
}
disabled={status === 'submitted' || status === 'streaming'}
>
Send
{getVoiceButtonText(state)}
</Button>
{/* Skip Button */}
{canSkipAudio && (
<Button
onClick={() => send({ type: 'SKIP_AUDIO' })}
size="xl"
radius="xl"
h={80}
color="gray"
variant="outline"
>
Skip
</Button>
)}
</Group>
</Paper>
</form>
{/* Development Test Controls */}
{process.env.NODE_ENV === 'development' && (
<Paper withBorder p="sm" radius="md" style={{ backgroundColor: '#1a1b1e' }}>
<Stack gap="xs">
<Text size="xs" fw={700} c="dimmed">
DEV: State Machine Testing
</Text>
<Text size="xs" c="dimmed">
State: {JSON.stringify(state.value)} | Tags: {Array.from(state.tags).join(', ')}
</Text>
<Group gap="xs">
<Button
size="xs"
onClick={() => send({ type: 'START_LISTENING' })}
disabled={!state.matches('checkingForGreeting')}
>
Force Listen
</Button>
<Button
size="xs"
onClick={() => send({ type: 'USER_STARTED_SPEAKING' })}
disabled={!state.matches('listening')}
>
Simulate Speech
</Button>
<Button
size="xs"
onClick={() => send({ type: 'FINALIZED_PHRASE', phrase: 'Test message' })}
disabled={!state.matches('userSpeaking') && !state.matches('listening')}
>
Add Phrase
</Button>
<Button
size="xs"
onClick={() => send({ type: 'SILENCE_TIMEOUT' })}
disabled={!state.matches('timingOut')}
>
Trigger Timeout
</Button>
</Group>
</Stack>
</Paper>
)}
{/* Text Input */}
<form
onSubmit={(e) => {
e.preventDefault();
if (input.trim() && !isVoiceActive) {
sendMessage({ text: input });
setInput('');
}
}}
>
<Group>
<TextInput
value={input}
onChange={(e) => setInput(e.currentTarget.value)}
placeholder="Or type your thoughts here..."
style={{ flex: 1 }}
variant="filled"
disabled={isVoiceActive}
/>
<Button
type="submit"
radius="xl"
loading={status === 'submitted' || status === 'streaming'}
disabled={!input.trim() || isVoiceActive}
>
Send
</Button>
</Group>
</form>
{/* Error Display */}
{error && (
<Text size="sm" c="red">
Error: {error}
</Text>
)}
</Stack>
</Container>
</Paper>
</Container>
);
}

664
app/chat/page.tsx.backup Normal file
View File

@@ -0,0 +1,664 @@
'use client';
import { useChat } from '@ai-sdk/react';
import {
Stack,
TextInput,
Button,
Paper,
ScrollArea,
Title,
Container,
Group,
Text,
Loader,
ActionIcon,
Tooltip,
} from '@mantine/core';
import { useRef, useState, useEffect, useCallback } from 'react';
import { IconVolume, IconMicrophone, IconMicrophoneOff } from '@tabler/icons-react';
import { UserMenu } from '@/components/UserMenu';
// Define the shape of the Deepgram transcript
interface DeepgramTranscript {
channel: {
alternatives: Array<{
transcript: string;
}>;
};
is_final: boolean;
speech_final: boolean;
}
type VoiceState = 'idle' | 'listening' | 'user-speaking' | 'processing' | 'ai-speaking';
export default function ChatPage() {
const viewport = useRef<HTMLDivElement>(null);
const [input, setInput] = useState('');
const [voiceState, setVoiceState] = useState<VoiceState>('idle');
const [countdown, setCountdown] = useState<number>(3);
const [isGeneratingSpeech, setIsGeneratingSpeech] = useState(false);
const lastSpokenMessageId = useRef<string | null>(null);
const audioRef = useRef<HTMLAudioElement | null>(null);
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const socketRef = useRef<WebSocket | null>(null);
const transcriptRef = useRef<string>('');
const silenceTimeoutRef = useRef<NodeJS.Timeout | null>(null);
const silenceStartTimeRef = useRef<number | null>(null);
const countdownIntervalRef = useRef<NodeJS.Timeout | null>(null);
const hasStartedSpeakingRef = useRef(false);
const { messages, sendMessage, setMessages, status } = useChat({
api: '/api/chat',
body: {
persona: 'Socratic',
},
credentials: 'include',
});
// Handle AI response in voice conversation mode
useEffect(() => {
if (voiceState !== 'processing') return;
console.log('[Voice Mode] Effect running - voiceState: processing, status:', status, 'messages:', messages.length);
// Wait until the AI response is complete (status returns to 'ready')
if (status !== 'ready') {
console.log('[Voice Mode] Waiting for status to be ready, current:', status);
return;
}
// Find the latest assistant message
console.log('[Voice Mode] All messages:', messages.map(m => ({ role: m.role, id: m.id, preview: m.parts[0]?.text?.substring(0, 30) })));
const lastAssistantMessage = [...messages]
.reverse()
.find((m) => m.role === 'assistant');
if (!lastAssistantMessage) {
console.log('[Voice Mode] No assistant message found');
return;
}
console.log('[Voice Mode] Selected message ID:', lastAssistantMessage.id);
console.log('[Voice Mode] Selected message text preview:', lastAssistantMessage.parts.find(p => p.type === 'text')?.text?.substring(0, 50));
console.log('[Voice Mode] Last spoken message ID:', lastSpokenMessageId.current);
// Skip if we've already spoken this message
if (lastSpokenMessageId.current === lastAssistantMessage.id) {
console.log('[Voice Mode] Already spoke this message, skipping');
return;
}
// Extract text from the message
const textPart = lastAssistantMessage.parts.find((p) => p.type === 'text');
if (!textPart || !textPart.text) {
console.log('[Voice Mode] No text part found in message');
return;
}
// Play the audio and transition to ai-speaking state
console.log('[Voice Mode] Transitioning to ai-speaking, will play audio');
setVoiceState('ai-speaking');
playAudio(textPart.text, lastAssistantMessage.id);
}, [messages, voiceState, status]);
const playAudio = async (text: string, messageId: string) => {
try {
console.log('[Voice Mode] Generating speech for message:', messageId);
setIsGeneratingSpeech(true);
const response = await fetch('/api/tts', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ text }),
});
if (!response.ok) {
throw new Error('Failed to generate speech');
}
const audioBlob = await response.blob();
const audioUrl = URL.createObjectURL(audioBlob);
// Create or reuse audio element
if (!audioRef.current) {
audioRef.current = new Audio();
}
audioRef.current.src = audioUrl;
audioRef.current.onended = () => {
URL.revokeObjectURL(audioUrl);
console.log('[Voice Mode] ✓ Finished playing audio, starting new listening session');
lastSpokenMessageId.current = messageId;
setIsGeneratingSpeech(false);
// After AI finishes speaking, go back to listening for user
startListening();
};
audioRef.current.onerror = () => {
URL.revokeObjectURL(audioUrl);
console.error('[Voice Mode] Error playing audio');
setIsGeneratingSpeech(false);
// On error, also go back to listening
startListening();
};
await audioRef.current.play();
console.log('[Voice Mode] ✓ Playing audio');
setIsGeneratingSpeech(false); // Audio is now playing
} catch (error) {
console.error('[Voice Mode] Error:', error);
setIsGeneratingSpeech(false);
// On error, go back to listening
startListening();
}
};
const submitUserInput = useCallback(() => {
// Clear any pending silence timeout and countdown
if (silenceTimeoutRef.current) {
clearTimeout(silenceTimeoutRef.current);
silenceTimeoutRef.current = null;
}
if (countdownIntervalRef.current) {
clearInterval(countdownIntervalRef.current);
countdownIntervalRef.current = null;
}
silenceStartTimeRef.current = null;
setCountdown(3);
// Stop recording
if (mediaRecorderRef.current) {
mediaRecorderRef.current.stop();
mediaRecorderRef.current = null;
}
if (socketRef.current) {
socketRef.current.close();
socketRef.current = null;
}
// Reset speaking flag
hasStartedSpeakingRef.current = false;
// Send the transcript as a message if we have one
if (transcriptRef.current.trim()) {
console.log('[Voice Mode] Submitting transcript:', transcriptRef.current);
setInput(transcriptRef.current);
setVoiceState('processing');
setTimeout(() => {
const form = document.querySelector('form');
if (form) {
console.log('[Voice Mode] Form found, submitting...');
form.requestSubmit();
} else {
console.error('[Voice Mode] Form not found!');
}
}, 100);
} else {
// If no transcript, go back to listening
console.log('[Voice Mode] No transcript to submit, going back to listening');
startListening();
}
transcriptRef.current = '';
}, []);
const startListening = useCallback(async () => {
transcriptRef.current = '';
setInput('');
hasStartedSpeakingRef.current = false;
// DON'T reset lastSpokenMessageId here - we need it to track what we've already spoken
silenceStartTimeRef.current = null;
setCountdown(3);
setVoiceState('listening');
try {
// 1. Get the Deepgram API key
const response = await fetch('/api/voice-token', { method: 'POST' });
const data = await response.json();
if (data.error) {
throw new Error(data.error);
}
const { key } = data;
// 2. Access the microphone
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
// 3. Open direct WebSocket to Deepgram with voice activity detection
const socket = new WebSocket(
'wss://api.deepgram.com/v1/listen?interim_results=true&punctuate=true&vad_events=true',
['token', key]
);
socketRef.current = socket;
socket.onopen = () => {
console.log('[Voice Mode] ✓ WebSocket connected, listening for speech...');
// 4. Create MediaRecorder
const mediaRecorder = new MediaRecorder(stream, {
mimeType: 'audio/webm',
});
mediaRecorderRef.current = mediaRecorder;
// 5. Send audio chunks on data available
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0 && socket.readyState === WebSocket.OPEN) {
socket.send(event.data);
}
};
// Start recording and chunking audio every 250ms
mediaRecorder.start(250);
};
// 6. Receive transcripts and handle silence detection
socket.onmessage = (event) => {
const data = JSON.parse(event.data) as DeepgramTranscript;
// Check if this message has alternatives (some Deepgram messages don't)
if (!data.channel?.alternatives) {
return; // Skip non-transcript messages (metadata, VAD events, etc.)
}
const transcript = data.channel.alternatives[0]?.transcript || '';
if (transcript) {
// User has started speaking
if (!hasStartedSpeakingRef.current) {
console.log('[Voice Mode] User started speaking');
hasStartedSpeakingRef.current = true;
setVoiceState('user-speaking');
}
// Clear any existing silence timeout and countdown
if (silenceTimeoutRef.current) {
clearTimeout(silenceTimeoutRef.current);
silenceTimeoutRef.current = null;
}
if (countdownIntervalRef.current) {
clearInterval(countdownIntervalRef.current);
countdownIntervalRef.current = null;
}
silenceStartTimeRef.current = null;
setCountdown(3);
// Handle transcript updates
if (data.is_final) {
// This is a finalized phrase - append it to our transcript
transcriptRef.current = transcriptRef.current
? transcriptRef.current + ' ' + transcript
: transcript;
setInput(transcriptRef.current);
console.log('[Voice Mode] Finalized phrase:', transcript);
// Start a generous 3-second silence timer after each finalized phrase
silenceStartTimeRef.current = Date.now();
// Update countdown every 100ms
countdownIntervalRef.current = setInterval(() => {
if (silenceStartTimeRef.current) {
const elapsed = Date.now() - silenceStartTimeRef.current;
const remaining = Math.max(0, 3 - elapsed / 1000);
setCountdown(remaining);
}
}, 100);
silenceTimeoutRef.current = setTimeout(() => {
console.log('[Voice Mode] 3 seconds of silence detected, submitting...');
submitUserInput();
}, 3000);
} else {
// This is an interim result - show it temporarily
const displayText = transcriptRef.current
? transcriptRef.current + ' ' + transcript
: transcript;
setInput(displayText);
}
}
};
socket.onclose = () => {
// Clean up stream
stream.getTracks().forEach((track) => track.stop());
console.log('[Voice Mode] WebSocket closed');
};
socket.onerror = (err) => {
console.error('[Voice Mode] WebSocket error:', err);
setVoiceState('idle');
};
} catch (error) {
console.error('[Voice Mode] Error starting listening:', error);
setVoiceState('idle');
}
}, [submitUserInput]);
const skipAudioAndListen = useCallback(() => {
console.log('[Voice Mode] Skipping audio playback');
// Stop current audio
if (audioRef.current) {
audioRef.current.pause();
audioRef.current.currentTime = 0;
}
setIsGeneratingSpeech(false);
// Go straight to listening
startListening();
}, [startListening]);
const exitVoiceMode = useCallback(() => {
// Clear any timeouts and intervals
if (silenceTimeoutRef.current) {
clearTimeout(silenceTimeoutRef.current);
silenceTimeoutRef.current = null;
}
if (countdownIntervalRef.current) {
clearInterval(countdownIntervalRef.current);
countdownIntervalRef.current = null;
}
silenceStartTimeRef.current = null;
// Stop recording
if (mediaRecorderRef.current) {
mediaRecorderRef.current.stop();
mediaRecorderRef.current = null;
}
if (socketRef.current) {
socketRef.current.close();
socketRef.current = null;
}
// Stop audio playback
if (audioRef.current) {
audioRef.current.pause();
audioRef.current = null;
}
hasStartedSpeakingRef.current = false;
lastSpokenMessageId.current = null;
transcriptRef.current = '';
setInput('');
setCountdown(3);
setIsGeneratingSpeech(false);
setVoiceState('idle');
console.log('[Voice Mode] Exited voice conversation mode');
}, []);
const handleToggleVoiceMode = useCallback(() => {
if (voiceState === 'idle') {
// Start voice conversation mode
// First, check if there's a recent AI message to read out
const lastAssistantMessage = [...messages]
.reverse()
.find((m) => m.role === 'assistant');
if (lastAssistantMessage) {
// Extract text from the message
const textPart = lastAssistantMessage.parts.find((p) => p.type === 'text');
if (textPart && textPart.text) {
// Play the most recent AI message first, then start listening
console.log('[Voice Mode] Starting voice mode, reading most recent AI message first');
setVoiceState('ai-speaking');
playAudio(textPart.text, lastAssistantMessage.id);
return;
}
}
// No AI message to read, just start listening
startListening();
} else {
// Exit voice conversation mode
exitVoiceMode();
}
}, [voiceState, startListening, exitVoiceMode, messages]);
// Add initial greeting message on first load
useEffect(() => {
if (messages.length === 0) {
setMessages([
{
id: 'initial-greeting',
role: 'assistant',
parts: [
{
type: 'text',
text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.',
},
],
},
]);
}
}, []);
// Auto-scroll to bottom
useEffect(() => {
viewport.current?.scrollTo({
top: viewport.current.scrollHeight,
behavior: 'smooth',
});
}, [messages]);
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault();
if (!input.trim() || status === 'submitted' || status === 'streaming') return;
sendMessage({ text: input });
setInput('');
};
const handleNewConversation = () => {
// Clear all messages and reset to initial greeting
setMessages([
{
id: 'initial-greeting',
role: 'assistant',
parts: [
{
type: 'text',
text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.',
},
],
},
]);
};
return (
<Container size="md" h="100vh" style={{ display: 'flex', flexDirection: 'column' }}>
<Group justify="space-between" py="md">
<Title order={2}>
Ponderants Interview
</Title>
<Group gap="md">
<Tooltip label="Start a new conversation">
<Button
variant="subtle"
onClick={handleNewConversation}
disabled={status === 'submitted' || status === 'streaming'}
>
New Conversation
</Button>
</Tooltip>
<UserMenu />
</Group>
</Group>
<ScrollArea
h="100%"
style={{ flex: 1 }}
viewportRef={viewport}
>
<Stack gap="md" pb="xl">
{messages.map((m) => (
<Paper
key={m.id}
withBorder
shadow="md"
p="sm"
radius="lg"
style={{
alignSelf: m.role === 'user' ? 'flex-end' : 'flex-start',
backgroundColor:
m.role === 'user' ? '#343a40' : '#212529',
}}
w="80%"
>
<Text fw={700} size="sm">{m.role === 'user' ? 'You' : 'AI'}</Text>
{m.parts.map((part, i) => {
if (part.type === 'text') {
return (
<Text key={i} style={{ whiteSpace: 'pre-wrap' }}>
{part.text}
</Text>
);
}
// Handle tool calls (e.g., suggest_node)
if (part.type === 'tool-call') {
return (
<Paper key={i} withBorder p="xs" mt="xs" bg="dark.6">
<Text size="xs" c="dimmed" mb="xs">
💡 Node Suggestion
</Text>
<Text fw={600}>{part.args.title}</Text>
<Text size="sm" mt="xs">
{part.args.content}
</Text>
{part.args.tags && part.args.tags.length > 0 && (
<Group gap="xs" mt="xs">
{part.args.tags.map((tag: string, tagIdx: number) => (
<Text key={tagIdx} size="xs" c="blue.4">
#{tag}
</Text>
))}
</Group>
)}
</Paper>
);
}
return null;
})}
</Paper>
))}
{/* Typing indicator while AI is generating a response */}
{(status === 'submitted' || status === 'streaming') && (
<Paper
withBorder
shadow="md"
p="sm"
radius="lg"
style={{
alignSelf: 'flex-start',
backgroundColor: '#212529',
}}
w="80%"
>
<Text fw={700} size="sm">AI</Text>
<Group gap="xs" mt="xs">
<Loader size="xs" />
<Text size="sm" c="dimmed">Thinking...</Text>
</Group>
</Paper>
)}
</Stack>
</ScrollArea>
{/* Big Voice Mode Button - shown above text input */}
<Paper withBorder p="md" radius="xl" my="md">
<Stack gap="sm">
<Group gap="sm">
<Button
onClick={handleToggleVoiceMode}
size="xl"
radius="xl"
h={80}
style={{ flex: 1 }}
color={
voiceState === 'ai-speaking'
? 'blue'
: voiceState === 'user-speaking'
? 'green'
: voiceState === 'listening'
? 'yellow'
: voiceState === 'processing'
? 'blue'
: 'gray'
}
variant={voiceState !== 'idle' ? 'filled' : 'light'}
leftSection={
voiceState === 'ai-speaking' ? (
<IconVolume size={32} />
) : voiceState === 'user-speaking' || voiceState === 'listening' ? (
<IconMicrophone size={32} />
) : (
<IconMicrophone size={32} />
)
}
disabled={status === 'submitted' || status === 'streaming'}
>
{voiceState === 'idle'
? 'Start Voice Conversation'
: voiceState === 'listening'
? 'Listening... Start speaking'
: voiceState === 'user-speaking'
? silenceStartTimeRef.current
? `Speaking... (auto-submits in ${countdown.toFixed(1)}s)`
: 'Speaking... (will auto-submit after 3s silence)'
: voiceState === 'processing'
? 'Processing...'
: isGeneratingSpeech
? 'Generating speech...'
: 'AI is speaking... Please wait'}
</Button>
{/* Skip button - only shown when AI is speaking */}
{voiceState === 'ai-speaking' && (
<Button
onClick={skipAudioAndListen}
size="xl"
radius="xl"
h={80}
color="gray"
variant="outline"
>
Skip
</Button>
)}
</Group>
{/* Text Input - always available */}
<form onSubmit={handleSubmit}>
<Group>
<TextInput
value={input}
onChange={(e) => setInput(e.currentTarget.value)}
placeholder="Or type your thoughts here..."
style={{ flex: 1 }}
variant="filled"
disabled={voiceState !== 'idle'}
/>
<Button
type="submit"
radius="xl"
loading={status === 'submitted' || status === 'streaming'}
disabled={!input.trim() || voiceState !== 'idle'}
>
Send
</Button>
</Group>
</form>
</Stack>
</Paper>
</Container>
);
}

814
app/chat/page.tsx.old Normal file
View File

@@ -0,0 +1,814 @@
'use client';
import { useChat } from '@ai-sdk/react';
import {
Stack,
TextInput,
Button,
Paper,
ScrollArea,
Title,
Container,
Group,
Text,
Loader,
ActionIcon,
Tooltip,
} from '@mantine/core';
import { useRef, useState, useEffect, useCallback } from 'react';
import { IconVolume, IconMicrophone, IconMicrophoneOff } from '@tabler/icons-react';
import { createActor } from 'xstate';
import { useSelector } from '@xstate/react';
import { appMachine } from '@/lib/app-machine';
import { UserMenu } from '@/components/UserMenu';
// Define the shape of the Deepgram transcript
interface DeepgramTranscript {
channel: {
alternatives: Array<{
transcript: string;
}>;
};
is_final: boolean;
speech_final: boolean;
}
/**
* Get the voice button text based on the current state tags.
* This replaces complex nested ternaries with a clean, readable function.
*/
function getVoiceButtonText(
state: ReturnType<typeof useSelector<typeof actorRef, any>>,
silenceStartTime: number | null
): string {
// Check tags in priority order and return appropriate text
let buttonText: string;
if (state.hasTag('textMode') || state.hasTag('voiceIdle')) {
buttonText = 'Start Voice Conversation';
} else if (state.hasTag('listening')) {
buttonText = 'Listening... Start speaking';
} else if (state.hasTag('userSpeaking')) {
buttonText = 'Speaking... (will auto-submit after 3s silence)';
} else if (state.hasTag('timingOut')) {
if (silenceStartTime) {
const elapsed = Date.now() - silenceStartTime;
const remaining = Math.max(0, 3 - elapsed / 1000);
buttonText = `Speaking... (auto-submits in ${remaining.toFixed(1)}s)`;
} else {
buttonText = 'Speaking... (timing out...)';
}
} else if (state.hasTag('processing')) {
buttonText = 'Processing...';
} else if (state.hasTag('aiGenerating')) {
buttonText = 'Generating speech...';
} else if (state.hasTag('aiSpeaking')) {
buttonText = 'AI is speaking... Please wait';
} else {
// Fallback (should never reach here if tags are properly defined)
buttonText = 'Start Voice Conversation';
console.warn('[Voice Mode] No matching tag found, using fallback text. Active tags:', state.tags);
}
console.log('[Voice Mode] Button text determined:', buttonText, 'Active tags:', Array.from(state.tags));
return buttonText;
}
export default function ChatPage() {
const viewport = useRef<HTMLDivElement>(null);
// XState machine for voice mode state management
const [actorRef] = useState(() => createActor(appMachine).start());
const state = useSelector(actorRef, (snapshot) => snapshot);
const send = actorRef.send.bind(actorRef);
// Imperative refs for managing side effects
const audioRef = useRef<HTMLAudioElement | null>(null);
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const socketRef = useRef<WebSocket | null>(null);
const silenceTimeoutRef = useRef<NodeJS.Timeout | null>(null);
const silenceStartTimeRef = useRef<number | null>(null);
const countdownIntervalRef = useRef<NodeJS.Timeout | null>(null);
const shouldCancelAudioRef = useRef<boolean>(false); // Flag to cancel pending audio operations
const { messages, sendMessage, setMessages, status } = useChat();
// Extract text from message (handles v5 parts structure)
const getMessageText = (msg: any): string => {
if ('parts' in msg && Array.isArray(msg.parts)) {
const textPart = msg.parts.find((p: any) => p.type === 'text');
return textPart?.text || '';
}
return msg.content || '';
};
// Handle AI response in voice conversation mode - SIMPLE VERSION
useEffect(() => {
if (!state.hasTag('processing')) return;
if (status !== 'ready') {
console.log('[Voice Mode] Waiting, status:', status);
return;
}
const transcript = state.context.transcript?.trim();
if (!transcript) return;
console.log('[Voice Mode] === PROCESSING ===');
console.log('[Voice Mode] Transcript:', transcript);
console.log('[Voice Mode] Messages:', messages.length);
// Get last 2 messages
const lastMsg = messages[messages.length - 1];
const secondLastMsg = messages[messages.length - 2];
console.log('[Voice Mode] Last msg:', lastMsg?.role, getMessageText(lastMsg || {}).substring(0, 30));
console.log('[Voice Mode] 2nd last msg:', secondLastMsg?.role, getMessageText(secondLastMsg || {}).substring(0, 30));
// Case 1: User message not submitted yet
// Check if the last message is the user's transcript
const userMessageExists = messages.some(m =>
m.role === 'user' && getMessageText(m) === transcript
);
if (!userMessageExists) {
console.log('[Voice Mode] → Submitting user message');
submitUserInput();
return;
}
// Case 2: User message submitted, check if AI has responded
// After user submits, if AI responds, the new AI message is LAST
if (lastMsg && lastMsg.role === 'assistant' &&
secondLastMsg && secondLastMsg.role === 'user' &&
getMessageText(secondLastMsg) === transcript) {
const aiMsg = lastMsg;
console.log('[Voice Mode] → AI response found:', aiMsg.id);
console.log('[Voice Mode] → Last spoken:', state.context.lastSpokenMessageId);
// Only play if we haven't played this message yet
if (state.context.lastSpokenMessageId !== aiMsg.id) {
const text = getMessageText(aiMsg);
console.log('[Voice Mode] → Playing:', text.substring(0, 50) + '...');
send({ type: 'AI_RESPONSE_READY', messageId: aiMsg.id, text });
playAudio(text, aiMsg.id);
} else {
console.log('[Voice Mode] → Already played, skipping');
}
return;
}
// Case 3: Waiting for AI response
console.log('[Voice Mode] → Waiting for AI response...');
}, [messages, state, status, send]);
// Stop all audio playback and cancel pending operations
const stopAllAudio = useCallback(() => {
console.log('[Voice Mode] Stopping all audio operations');
// Set cancel flag to prevent any pending audio from playing
shouldCancelAudioRef.current = true;
// Stop and clean up audio element
if (audioRef.current) {
audioRef.current.pause();
audioRef.current.currentTime = 0;
audioRef.current.src = '';
}
}, []);
const playAudio = async (text: string, messageId: string) => {
try {
// Reset cancel flag at the start of a new audio operation
shouldCancelAudioRef.current = false;
console.log('[Voice Mode] Generating speech for message:', messageId);
console.log('[Voice Mode] State transition:', state.value);
const response = await fetch('/api/tts', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ text }),
});
// Check if we should cancel before continuing
if (shouldCancelAudioRef.current) {
console.log('[Voice Mode] Audio generation canceled before blob creation');
return;
}
if (!response.ok) {
throw new Error('Failed to generate speech');
}
const audioBlob = await response.blob();
// Check again after async operation
if (shouldCancelAudioRef.current) {
console.log('[Voice Mode] Audio generation canceled after blob creation');
return;
}
const audioUrl = URL.createObjectURL(audioBlob);
// Create or reuse audio element
if (!audioRef.current) {
audioRef.current = new Audio();
}
audioRef.current.src = audioUrl;
audioRef.current.onended = () => {
URL.revokeObjectURL(audioUrl);
console.log('[Voice Mode] ✓ Finished playing audio, sending TTS_FINISHED event');
console.log('[Voice Mode] State transition:', state.value);
send({ type: 'TTS_FINISHED', messageId });
// After AI finishes speaking, go back to listening for user
startListening();
};
audioRef.current.onerror = () => {
URL.revokeObjectURL(audioUrl);
console.error('[Voice Mode] Error playing audio');
// On error, also go back to listening
startListening();
};
// Final check before playing
if (shouldCancelAudioRef.current) {
console.log('[Voice Mode] Audio playback canceled before play()');
URL.revokeObjectURL(audioUrl);
return;
}
await audioRef.current.play();
// Only send TTS_PLAYING if we haven't been canceled
if (!shouldCancelAudioRef.current) {
console.log('[Voice Mode] ✓ Playing audio, sending TTS_PLAYING event');
console.log('[Voice Mode] State transition:', state.value);
send({ type: 'TTS_PLAYING' });
} else {
console.log('[Voice Mode] Audio playback canceled after play()');
URL.revokeObjectURL(audioUrl);
}
} catch (error) {
console.error('[Voice Mode] Error:', error);
// On error, go back to listening
startListening();
}
};
const submitUserInput = useCallback(() => {
// Clear any pending silence timeout and countdown
if (silenceTimeoutRef.current) {
clearTimeout(silenceTimeoutRef.current);
silenceTimeoutRef.current = null;
}
if (countdownIntervalRef.current) {
clearInterval(countdownIntervalRef.current);
countdownIntervalRef.current = null;
}
silenceStartTimeRef.current = null;
// Stop recording
if (mediaRecorderRef.current) {
mediaRecorderRef.current.stop();
mediaRecorderRef.current = null;
}
if (socketRef.current) {
socketRef.current.close();
socketRef.current = null;
}
// Send the transcript as a message if we have one
const transcript = state.context.transcript;
if (transcript.trim()) {
console.log('[Voice Mode] Submitting transcript:', transcript);
console.log('[Voice Mode] State transition:', state.value);
setTimeout(() => {
const form = document.querySelector('form');
if (form) {
console.log('[Voice Mode] Form found, submitting...');
form.requestSubmit();
} else {
console.error('[Voice Mode] Form not found!');
}
}, 100);
} else {
// If no transcript, go back to listening
console.log('[Voice Mode] No transcript to submit, going back to listening');
startListening();
}
}, [state, send]);
const startListening = useCallback(async () => {
silenceStartTimeRef.current = null;
// Send event to enter listening state (which clears transcript/input/countdown)
console.log('[Voice Mode] Sending START_LISTENING event (implicitly via state transition)');
console.log('[Voice Mode] State transition:', state.value);
try {
// 1. Get the Deepgram API key
const response = await fetch('/api/voice-token', { method: 'POST' });
const data = await response.json();
if (data.error) {
throw new Error(data.error);
}
const { key } = data;
// 2. Access the microphone
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
// 3. Open direct WebSocket to Deepgram with voice activity detection
const socket = new WebSocket(
'wss://api.deepgram.com/v1/listen?interim_results=true&punctuate=true&vad_events=true',
['token', key]
);
socketRef.current = socket;
socket.onopen = () => {
console.log('[Voice Mode] ✓ WebSocket connected, listening for speech...');
console.log('[Voice Mode] State transition:', state.value);
// 4. Create MediaRecorder
const mediaRecorder = new MediaRecorder(stream, {
mimeType: 'audio/webm',
});
mediaRecorderRef.current = mediaRecorder;
// 5. Send audio chunks on data available
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0 && socket.readyState === WebSocket.OPEN) {
socket.send(event.data);
}
};
// Start recording and chunking audio every 250ms
mediaRecorder.start(250);
};
// 6. Receive transcripts and handle silence detection
socket.onmessage = (event) => {
const data = JSON.parse(event.data) as DeepgramTranscript;
// Check if this message has alternatives (some Deepgram messages don't)
if (!data.channel?.alternatives) {
return; // Skip non-transcript messages (metadata, VAD events, etc.)
}
const transcript = data.channel.alternatives[0]?.transcript || '';
if (transcript) {
// User has started speaking
if (!state.context.hasStartedSpeaking) {
console.log('[Voice Mode] User started speaking, sending USER_STARTED_SPEAKING event');
console.log('[Voice Mode] State transition:', state.value);
send({ type: 'USER_STARTED_SPEAKING' });
}
// Clear any existing silence timeout and countdown
if (silenceTimeoutRef.current) {
clearTimeout(silenceTimeoutRef.current);
silenceTimeoutRef.current = null;
}
if (countdownIntervalRef.current) {
clearInterval(countdownIntervalRef.current);
countdownIntervalRef.current = null;
}
silenceStartTimeRef.current = null;
// Handle transcript updates
if (data.is_final) {
// This is a finalized phrase - send to machine
console.log('[Voice Mode] === FINALIZED PHRASE ===');
console.log('[Voice Mode] Transcript:', transcript);
console.log('[Voice Mode] state.value BEFORE:', JSON.stringify(state.value));
console.log('[Voice Mode] tags BEFORE:', Array.from(state.tags));
console.log('[Voice Mode] context BEFORE:', JSON.stringify(state.context));
console.log('[Voice Mode] Sending FINALIZED_PHRASE event');
send({ type: 'FINALIZED_PHRASE', phrase: transcript });
// Start a generous 3-second silence timer after each finalized phrase
silenceStartTimeRef.current = Date.now();
// Update countdown every 100ms
countdownIntervalRef.current = setInterval(() => {
if (silenceStartTimeRef.current) {
const elapsed = Date.now() - silenceStartTimeRef.current;
const remaining = Math.max(0, 3 - elapsed / 1000);
// Note: countdown is now managed in machine context, but we need
// to update it frequently for UI display. This is acceptable as
// a UI-only side effect.
}
}, 100);
silenceTimeoutRef.current = setTimeout(() => {
console.log('[Voice Mode] 3 seconds of silence detected, sending SILENCE_TIMEOUT event');
console.log('[Voice Mode] State transition:', state.value);
send({ type: 'SILENCE_TIMEOUT' });
// Note: submitUserInput will be called by the processing state effect
}, 3000);
} else {
// This is an interim result - update display (send TRANSCRIPT_UPDATE)
const currentTranscript = state.context.transcript;
const displayText = currentTranscript
? currentTranscript + ' ' + transcript
: transcript;
send({ type: 'TRANSCRIPT_UPDATE', transcript: displayText });
}
}
};
socket.onclose = () => {
// Clean up stream
stream.getTracks().forEach((track) => track.stop());
console.log('[Voice Mode] WebSocket closed');
console.log('[Voice Mode] State transition:', state.value);
};
socket.onerror = (err) => {
console.error('[Voice Mode] WebSocket error:', err);
console.log('[Voice Mode] State transition:', state.value);
// On error, toggle back to text mode if we're in voice mode
if (!state.hasTag('textMode')) {
send({ type: 'TOGGLE_VOICE_MODE' });
}
};
} catch (error) {
console.error('[Voice Mode] Error starting listening:', error);
console.log('[Voice Mode] State transition:', state.value);
// On error, toggle back to text mode if we're in voice mode
if (!state.hasTag('textMode')) {
send({ type: 'TOGGLE_VOICE_MODE' });
}
}
}, [submitUserInput, state, send]);
const skipAudioAndListen = useCallback(() => {
console.log('[Voice Mode] === SKIP BUTTON CLICKED ===');
console.log('[Voice Mode] Current state.value:', JSON.stringify(state.value));
console.log('[Voice Mode] Current tags:', Array.from(state.tags));
// Stop ALL audio operations
stopAllAudio();
// Send skip event
send({ type: 'SKIP_AUDIO' });
// Go straight to listening
startListening();
}, [startListening, state, send, stopAllAudio]);
const handleToggleVoiceMode = useCallback(() => {
console.log('[Voice Mode] Voice button pressed, sending TOGGLE_VOICE_MODE event');
console.log('[Voice Mode] Current state:', state.value);
send({ type: 'TOGGLE_VOICE_MODE' });
}, [state, send]);
// Handle entering voice.idle state (after TOGGLE_VOICE_MODE from text mode)
useEffect(() => {
if (!state.hasTag('voiceIdle')) return;
console.log('[Voice Mode] Entered voice.idle, checking for AI message to read');
// Get ALL assistant messages in order
const assistantMessages = messages.filter((m) => m.role === 'assistant');
console.log('[Voice Mode] (idle) Found', assistantMessages.length, 'assistant messages');
if (assistantMessages.length === 0) {
console.log('[Voice Mode] (idle) No assistant messages, starting listening');
send({ type: 'START_LISTENING' });
startListening();
return;
}
// Get the LAST (most recent) assistant message
const latestAssistantMessage = assistantMessages[assistantMessages.length - 1];
console.log('[Voice Mode] (idle) Latest message ID:', latestAssistantMessage.id);
console.log('[Voice Mode] (idle) Last spoken message ID:', state.context.lastSpokenMessageId);
// Skip if we've already spoken this message
if (state.context.lastSpokenMessageId === latestAssistantMessage.id) {
console.log('[Voice Mode] (idle) Already spoke latest message, starting listening');
send({ type: 'START_LISTENING' });
startListening();
return;
}
// Extract text from the message
let text = '';
if ('parts' in latestAssistantMessage && Array.isArray((latestAssistantMessage as any).parts)) {
const textPart = (latestAssistantMessage as any).parts.find((p: any) => p.type === 'text');
text = textPart?.text || '';
}
if (text) {
// Play the most recent AI message first, then start listening
console.log('[Voice Mode] (idle) Reading latest AI message:', text.substring(0, 50) + '...');
send({ type: 'AI_RESPONSE_READY', messageId: latestAssistantMessage.id, text });
playAudio(text, latestAssistantMessage.id);
return;
}
// No text found, just start listening
console.log('[Voice Mode] (idle) No text in latest message, starting listening');
send({ type: 'START_LISTENING' });
startListening();
}, [state, messages, send]);
// Stop audio when leaving audio-related states
useEffect(() => {
const isInAudioState = state.hasTag('canSkipAudio');
if (!isInAudioState) {
// We're not in an audio state, make sure everything is stopped
stopAllAudio();
}
}, [state, stopAllAudio]);
// Log state transitions for debugging
useEffect(() => {
console.log('[Voice Mode] === STATE TRANSITION ===');
console.log('[Voice Mode] state.value:', JSON.stringify(state.value));
console.log('[Voice Mode] Active tags:', Array.from(state.tags));
console.log('[Voice Mode] Context:', JSON.stringify(state.context));
}, [state.value]);
// Add initial greeting message on first load
useEffect(() => {
if (messages.length === 0) {
setMessages([
{
id: 'initial-greeting',
role: 'assistant',
parts: [
{
type: 'text',
text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.',
},
],
} as any,
]);
}
}, []);
// Auto-scroll to bottom
useEffect(() => {
viewport.current?.scrollTo({
top: viewport.current.scrollHeight,
behavior: 'smooth',
});
}, [messages]);
const handleSubmit = (e: React.FormEvent) => {
e.preventDefault();
const inputText = state.context.input;
if (!inputText.trim() || status === 'submitted' || status === 'streaming') return;
console.log('[Voice Mode] Submitting message:', inputText);
console.log('[Voice Mode] State transition:', state.value);
sendMessage({ text: inputText });
// Clear input via machine context (will be cleared on next state transition)
};
const handleNewConversation = () => {
// Clear all messages and reset to initial greeting
setMessages([
{
id: 'initial-greeting',
role: 'assistant',
parts: [
{
type: 'text',
text: 'Welcome to Ponderants! I\'m here to help you explore and structure your ideas through conversation.\n\nWhat would you like to talk about today? I can adapt my interview style to best suit your needs (Socratic questioning, collaborative brainstorming, or other approaches).\n\nJust start sharing your thoughts, and we\'ll discover meaningful insights together.',
},
],
} as any,
]);
};
return (
<Container size="md" h="100vh" style={{ display: 'flex', flexDirection: 'column' }}>
<Group justify="space-between" py="md">
<Title order={2}>
Ponderants Interview
</Title>
<Group gap="md">
<Tooltip label="Start a new conversation">
<Button
variant="subtle"
onClick={handleNewConversation}
disabled={status === 'submitted' || status === 'streaming'}
>
New Conversation
</Button>
</Tooltip>
<UserMenu />
</Group>
</Group>
<ScrollArea
h="100%"
style={{ flex: 1 }}
viewportRef={viewport}
>
<Stack gap="md" pb="xl">
{messages.map((m) => (
<Paper
key={m.id}
withBorder
shadow="md"
p="sm"
radius="lg"
style={{
alignSelf: m.role === 'user' ? 'flex-end' : 'flex-start',
backgroundColor:
m.role === 'user' ? '#343a40' : '#212529',
}}
w="80%"
>
<Text fw={700} size="sm">{m.role === 'user' ? 'You' : 'AI'}</Text>
{/* Extract text from message parts */}
{(() => {
if ('parts' in m && Array.isArray((m as any).parts)) {
return (m as any).parts.map((part: any, i: number) => {
if (part.type === 'text') {
return (
<Text key={i} style={{ whiteSpace: 'pre-wrap' }}>
{part.text}
</Text>
);
}
return null;
});
}
return <Text>Message content unavailable</Text>;
})()}
</Paper>
))}
{/* Typing indicator while AI is generating a response */}
{(status === 'submitted' || status === 'streaming') && (
<Paper
withBorder
shadow="md"
p="sm"
radius="lg"
style={{
alignSelf: 'flex-start',
backgroundColor: '#212529',
}}
w="80%"
>
<Text fw={700} size="sm">AI</Text>
<Group gap="xs" mt="xs">
<Loader size="xs" />
<Text size="sm" c="dimmed">Thinking...</Text>
</Group>
</Paper>
)}
</Stack>
</ScrollArea>
{/* Big Voice Mode Button - shown above text input */}
<Paper withBorder p="md" radius="xl" my="md">
<Stack gap="sm">
<Group gap="sm">
<Button
onClick={handleToggleVoiceMode}
size="xl"
radius="xl"
h={80}
style={{ flex: 1 }}
color={
state.hasTag('canSkipAudio')
? 'blue'
: state.hasTag('userSpeaking') || state.hasTag('timingOut')
? 'green'
: state.hasTag('listening')
? 'yellow'
: state.hasTag('processing')
? 'blue'
: 'gray'
}
variant={!state.hasTag('textMode') && !state.hasTag('voiceIdle') ? 'filled' : 'light'}
leftSection={
state.hasTag('canSkipAudio') ? (
<IconVolume size={32} />
) : state.hasTag('userSpeaking') || state.hasTag('timingOut') || state.hasTag('listening') ? (
<IconMicrophone size={32} />
) : (
<IconMicrophone size={32} />
)
}
disabled={status === 'submitted' || status === 'streaming'}
>
{getVoiceButtonText(state, silenceStartTimeRef.current)}
</Button>
{/* Skip button - shown when audio can be skipped */}
{state.hasTag('canSkipAudio') && (
<Button
onClick={skipAudioAndListen}
size="xl"
radius="xl"
h={80}
color="gray"
variant="outline"
>
Skip
</Button>
)}
</Group>
{/* Test Controls - Development Only */}
{process.env.NODE_ENV === 'development' && (
<Paper withBorder p="sm" radius="md" style={{ backgroundColor: '#1a1b1e' }}>
<Stack gap="xs">
<Text size="xs" fw={700} c="dimmed">DEV: State Machine Testing</Text>
<Text size="xs" c="dimmed">
State: {JSON.stringify(state.value)} | Tags: {Array.from(state.tags).join(', ')}
</Text>
<Group gap="xs">
<Button
size="xs"
onClick={() => send({ type: 'START_LISTENING' })}
disabled={state.hasTag('textMode')}
>
Start Listening
</Button>
<Button
size="xs"
onClick={() => send({ type: 'USER_STARTED_SPEAKING' })}
disabled={!state.hasTag('listening')}
>
Simulate Speech
</Button>
<Button
size="xs"
onClick={() => {
send({ type: 'FINALIZED_PHRASE', phrase: 'Test message' });
}}
disabled={!state.hasTag('userSpeaking') && !state.hasTag('listening')}
>
Add Phrase
</Button>
<Button
size="xs"
onClick={() => send({ type: 'SILENCE_TIMEOUT' })}
disabled={!state.hasTag('timingOut')}
>
Trigger Timeout
</Button>
<Button
size="xs"
onClick={() => {
const testMsg = messages.filter(m => m.role === 'assistant')[0];
if (testMsg) {
const text = (testMsg as any).parts?.[0]?.text || 'Test AI response';
send({ type: 'AI_RESPONSE_READY', messageId: testMsg.id, text });
}
}}
disabled={!state.hasTag('processing')}
>
Simulate AI Response
</Button>
</Group>
</Stack>
</Paper>
)}
{/* Text Input - always available */}
<form onSubmit={handleSubmit}>
<Group>
<TextInput
value={state.context.input}
onChange={(e) => send({ type: 'TRANSCRIPT_UPDATE', transcript: e.currentTarget.value })}
placeholder="Or type your thoughts here..."
style={{ flex: 1 }}
variant="filled"
disabled={!state.hasTag('textMode') && !state.hasTag('voiceIdle')}
/>
<Button
type="submit"
radius="xl"
loading={status === 'submitted' || status === 'streaming'}
disabled={!state.context.input.trim() || (!state.hasTag('textMode') && !state.hasTag('voiceIdle'))}
>
Send
</Button>
</Group>
</form>
</Stack>
</Paper>
</Container>
);
}

302
app/edit/page.tsx Normal file
View File

@@ -0,0 +1,302 @@
'use client';
/**
* Edit Node Page
*
* Editor for reviewing and publishing node drafts generated from conversations.
* Displays the AI-generated draft and allows editing before publishing.
*/
import {
Stack,
Title,
Text,
Paper,
TextInput,
Textarea,
Button,
Group,
Container,
Divider,
Checkbox,
Badge,
Loader,
} from '@mantine/core';
import { useState, useEffect } from 'react';
import { IconDeviceFloppy, IconX, IconRefresh } from '@tabler/icons-react';
import { useAppMachine } from '@/hooks/useAppMachine';
import { useSelector } from '@xstate/react';
import { notifications } from '@mantine/notifications';
interface SuggestedNode {
id: string;
title: string;
body: string;
atp_uri: string;
score: number;
}
export default function EditPage() {
const appActor = useAppMachine();
const pendingDraft = useSelector(appActor, (state) => state.context.pendingNodeDraft);
const [title, setTitle] = useState('');
const [content, setContent] = useState('');
const [isPublishing, setIsPublishing] = useState(false);
const [suggestedNodes, setSuggestedNodes] = useState<SuggestedNode[]>([]);
const [selectedLinks, setSelectedLinks] = useState<string[]>([]);
const [isLoadingSuggestions, setIsLoadingSuggestions] = useState(false);
// Load draft when available
useEffect(() => {
if (pendingDraft) {
setTitle(pendingDraft.title);
setContent(pendingDraft.content);
}
}, [pendingDraft]);
// Fetch link suggestions when content changes
const fetchLinkSuggestions = async () => {
if (!content.trim() || content.trim().length < 50) {
setSuggestedNodes([]);
return;
}
setIsLoadingSuggestions(true);
try {
const response = await fetch('/api/suggest-links', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
credentials: 'include',
body: JSON.stringify({ body: content }),
});
if (!response.ok) {
throw new Error('Failed to fetch suggestions');
}
const suggestions = await response.json();
setSuggestedNodes(suggestions);
} catch (error) {
console.error('[Link Suggestions] Error:', error);
} finally {
setIsLoadingSuggestions(false);
}
};
// Auto-fetch suggestions when content is substantial
useEffect(() => {
const timer = setTimeout(() => {
if (content.trim().length >= 50) {
fetchLinkSuggestions();
}
}, 1000); // Debounce 1 second
return () => clearTimeout(timer);
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [content]); // fetchLinkSuggestions is stable and doesn't need to be in deps
const handlePublish = async () => {
if (!title.trim() || !content.trim()) {
notifications.show({
title: 'Missing content',
message: 'Please provide both a title and content for your node',
color: 'red',
});
return;
}
setIsPublishing(true);
try {
const response = await fetch('/api/nodes', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
credentials: 'include', // Include cookies for authentication
body: JSON.stringify({
title: title.trim(),
body: content.trim(),
links: selectedLinks,
}),
});
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData.error || 'Failed to publish node');
}
const result = await response.json();
// Show success notification
const message = result.warning || 'Your node has been published to your Bluesky account';
notifications.show({
title: 'Node published!',
message,
color: result.warning ? 'yellow' : 'green',
});
// Transition back to conversation view
// (Galaxy view requires the cache, which may have failed)
appActor.send({
type: 'CANCEL_EDIT', // Go back to conversation
});
} catch (error) {
console.error('[Publish Node] Error:', error);
notifications.show({
title: 'Error',
message: error instanceof Error ? error.message : 'Failed to publish node',
color: 'red',
});
} finally {
setIsPublishing(false);
}
};
const handleCancel = () => {
if (pendingDraft) {
appActor.send({ type: 'CANCEL_EDIT' });
} else {
// Manual node creation - go back to conversation
appActor.send({ type: 'NAVIGATE_TO_CONVO' });
}
};
const toggleLinkSelection = (nodeId: string) => {
setSelectedLinks((prev) =>
prev.includes(nodeId)
? prev.filter((id) => id !== nodeId)
: [...prev, nodeId]
);
};
return (
<Container size="md" py="xl" style={{ height: '100vh', display: 'flex', flexDirection: 'column' }}>
<Stack gap="lg" style={{ flex: 1 }}>
<Group justify="space-between">
<Title order={2}>Edit Node</Title>
<Group gap="md">
<Button
variant="subtle"
color="gray"
leftSection={<IconX size={18} />}
onClick={handleCancel}
disabled={isPublishing}
>
Cancel
</Button>
<Button
variant="filled"
color="blue"
leftSection={<IconDeviceFloppy size={18} />}
onClick={handlePublish}
loading={isPublishing}
disabled={!title.trim() || !content.trim()}
>
Publish Node
</Button>
</Group>
</Group>
<Paper p="xl" withBorder style={{ flex: 1 }}>
<Stack gap="lg">
<TextInput
label="Title"
placeholder="Enter a concise, compelling title"
value={title}
onChange={(e) => setTitle(e.currentTarget.value)}
size="lg"
required
/>
<Divider />
<Textarea
label="Content"
placeholder="Write your node content in markdown..."
value={content}
onChange={(e) => setContent(e.currentTarget.value)}
minRows={15}
autosize
required
styles={{
input: {
fontFamily: 'monospace',
},
}}
/>
{/* Link Suggestions Section */}
{content.trim().length >= 50 && (
<>
<Divider />
<Stack gap="sm">
<Group justify="space-between">
<Title order={4}>Suggested Links</Title>
<Group gap="xs">
{isLoadingSuggestions && <Loader size="sm" />}
<Button
size="xs"
variant="subtle"
leftSection={<IconRefresh size={14} />}
onClick={fetchLinkSuggestions}
disabled={isLoadingSuggestions}
>
Refresh
</Button>
</Group>
</Group>
{suggestedNodes.length === 0 && !isLoadingSuggestions && (
<Text size="sm" c="dimmed">
No similar nodes found. This will be your first node on this topic!
</Text>
)}
{suggestedNodes.map((node) => (
<Paper key={node.id} p="sm" withBorder>
<Stack gap="xs">
<Group gap="xs">
<Checkbox
checked={selectedLinks.includes(node.id)}
onChange={() => toggleLinkSelection(node.id)}
/>
<div style={{ flex: 1 }}>
<Group justify="space-between">
<Text fw={600} size="sm">
{node.title}
</Text>
<Badge size="xs" variant="light">
{(node.score * 100).toFixed(0)}% similar
</Badge>
</Group>
<Text size="xs" c="dimmed" lineClamp={2}>
{node.body}
</Text>
</div>
</Group>
</Stack>
</Paper>
))}
</Stack>
</>
)}
{pendingDraft?.conversationContext && (
<>
<Divider />
<Paper p="md" withBorder style={{ backgroundColor: '#1a1b1e' }}>
<Text size="sm" fw={700} mb="sm">
Conversation Context
</Text>
<Text size="xs" c="dimmed" style={{ whiteSpace: 'pre-wrap' }}>
{pendingDraft.conversationContext}
</Text>
</Paper>
</>
)}
</Stack>
</Paper>
</Stack>
</Container>
);
}

View File

@@ -1,58 +1,19 @@
'use client';
import { Button, Box } from '@mantine/core';
import { Suspense, useState } from 'react';
import { Box, Text, Stack } from '@mantine/core';
import { Suspense } from 'react';
import { ThoughtGalaxy } from '@/components/ThoughtGalaxy';
import { notifications } from '@mantine/notifications';
export default function GalaxyPage() {
const [isCalculating, setIsCalculating] = useState(false);
// This key forces a re-render of the galaxy component
const [galaxyKey, setGalaxyKey] = useState(Date.now());
const handleCalculateGraph = async () => {
setIsCalculating(true);
try {
const response = await fetch('/api/calculate-graph', { method: 'POST' });
const data = await response.json();
if (!response.ok) {
throw new Error(data.error || 'Failed to calculate graph');
}
notifications.show({
title: 'Success',
message: data.message || `Mapped ${data.nodes_mapped} nodes to 3D space`,
color: 'green',
});
// Refresh the galaxy component by changing its key
setGalaxyKey(Date.now());
} catch (error) {
console.error(error);
notifications.show({
title: 'Error',
message: error instanceof Error ? error.message : 'Failed to calculate graph',
color: 'red',
});
} finally {
setIsCalculating(false);
}
};
return (
<Box style={{ height: '100vh', width: '100vw', position: 'relative' }}>
<Button
onClick={handleCalculateGraph}
loading={isCalculating}
style={{ position: 'absolute', top: 20, left: 20, zIndex: 10 }}
>
Calculate My Graph
</Button>
{/* R3F Canvas for the 3D visualization */}
<Suspense fallback={<Box>Loading 3D Scene...</Box>}>
<ThoughtGalaxy key={galaxyKey} />
<Suspense fallback={
<Stack align="center" justify="center" style={{ height: '100vh' }}>
<Text c="dimmed">Loading your thought galaxy...</Text>
</Stack>
}>
<ThoughtGalaxy />
</Suspense>
</Box>
);

View File

@@ -5,12 +5,17 @@ import { MantineProvider, ColorSchemeScript } from "@mantine/core";
import { Notifications } from "@mantine/notifications";
import "@mantine/notifications/styles.css";
import { theme } from "./theme";
import { AppLayout } from "@/components/AppLayout";
const inter = Inter({ subsets: ["latin"] });
export const metadata: Metadata = {
title: "Ponderants",
description: "Your AI Thought Partner",
icons: {
icon: "/logo.svg",
apple: "/logo.svg",
},
};
export default function RootLayout({
@@ -27,7 +32,7 @@ export default function RootLayout({
<body className={inter.className} suppressHydrationWarning>
<MantineProvider theme={theme} defaultColorScheme="dark">
<Notifications />
{children}
<AppLayout>{children}</AppLayout>
</MantineProvider>
</body>
</html>

View File

@@ -25,8 +25,6 @@ export const theme = createTheme({
// Set default dark mode and grayscale for the "minimalist" look
defaultRadius: 'md',
fontFamily: 'Inter, sans-serif',
// Enforce dark mode
forceColorScheme: 'dark',
// Set default component props for a consistent look
components: {