feat: Improve UI layout and navigation

- Increase logo size (48x48 desktop, 56x56 mobile) for better visibility
- Add logo as favicon
- Add logo to mobile header
- Move user menu to navigation bars (sidebar on desktop, bottom bar on mobile)
- Fix desktop chat layout - container structure prevents voice controls cutoff
- Fix mobile bottom bar - use icon-only ActionIcons instead of truncated text buttons
- Hide Create Node/New Conversation buttons on mobile to save header space
- Make fixed header and voice controls work properly with containers

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-09 14:43:11 +00:00
parent 47b35b9caf
commit 0ed2d6c0b3
57 changed files with 6996 additions and 629 deletions

View File

@@ -1,40 +1,131 @@
import { test } from 'magnitude-test';
test('[Happy Path] User can record voice and see transcript', async (agent) => {
// Act: Go to chat page
await agent.act('Navigate to /chat');
test('[Happy Path] User can have a full voice conversation with AI', async (agent) => {
// Act: Navigate to chat page (assumes user is already authenticated)
await agent.open('http://localhost:3000/chat');
// Check: Verify initial state
await agent.check('The chat input field is empty');
await agent.check('A "Start Recording" button is visible');
// Check: Initial state - voice button shows "Start Voice Conversation"
await agent.check('A button with text "Start Voice Conversation" is visible');
// Act: Click the record button
// Note: This will require mocking the /api/voice-token response and the
// MediaDevices/WebSocket browser APIs in a real test environment
await agent.act('Click the "Start Recording" button');
// Act: Click to start voice mode
await agent.act('Click the "Start Voice Conversation" button');
// Check: UI updates to recording state
await agent.check('A "Stop Recording" button is visible');
// Act: Simulate receiving a transcript from the (mocked) Deepgram WebSocket
await agent.act(
'Simulate an interim transcript "Hello world" from the Deepgram WebSocket'
// Check: Button text changes to indicate checking or generating state
// Could be "Checking for greeting..." or "Generating speech..." or "Listening..."
await agent.check(
'The button text has changed from "Start Voice Conversation" to indicate an active state'
);
// Check: The input field is updated
await agent.check('The chat input field contains "Hello world"');
// Act: If there's a Skip button visible (greeting is playing), click it
await agent.act('Click the Skip button if it is visible');
// Act: Simulate a final transcript
await agent.act(
'Simulate a final transcript "Hello world." from the Deepgram WebSocket'
);
// Check: Should transition to listening state
await agent.check('The button shows "Listening... Start speaking"');
// Check: The "Stop Recording" button is gone
await agent.check('A "Start Recording" button is visible again');
// Check: Development test controls should be visible (in dev mode)
await agent.check('A section with text "DEV: State Machine Testing" is visible');
// Check: The chat input is cleared (because it was submitted)
await agent.check('The chat input field is empty');
// Act: Use dev button to simulate user starting to speak
await agent.act('Click the "Simulate Speech" button in the dev controls');
// Check: The finalized transcript appears as a user message
await agent.check('The message "Hello world." appears in the chat list');
// Check: Button shows speaking state
await agent.check('The button text contains "Speaking"');
// Act: Add a phrase using the dev button
await agent.act('Click the "Add Phrase" button in the dev controls');
// Check: A message bubble appears showing the transcript being spoken
await agent.check('A message with text "You (speaking...)" is visible');
await agent.check('The message contains the text "Test message"');
// Check: Button shows timing out state
await agent.check('The button text contains "auto-submit"');
// Act: Trigger the timeout using dev button
await agent.act('Click the "Trigger Timeout" button in the dev controls');
// Check: Button shows submitting or waiting state
await agent.check('The button text contains "Submitting" or "Waiting for AI"');
// Check: The user message appears in the chat
await agent.check('A message with text "Test message" appears in the chat history');
// Wait for AI response (this takes a few seconds)
await agent.wait(10000);
// Check: AI message appears
await agent.check('An AI message appears in the chat');
// Check: Button shows generating or playing TTS state
await agent.check('The button text contains "Generating speech" or "AI is speaking"');
// Check: Skip button is visible during TTS
await agent.check('A "Skip" button is visible');
// Act: Skip the AI audio
await agent.act('Click the Skip button');
// Check: Returns to listening state
await agent.check('The button shows "Listening... Start speaking"');
// Act: Stop voice mode
await agent.act('Click the main voice button to stop');
// Check: Returns to idle state
await agent.check('The button shows "Start Voice Conversation"');
});
test('[Unhappy Path] Voice mode handles errors gracefully', async (agent) => {
await agent.open('http://localhost:3000/chat');
// Act: Start voice mode
await agent.act('Click the "Start Voice Conversation" button');
// Simulate an error scenario (e.g., microphone permission denied)
// Note: In a real test, this would involve mocking the getUserMedia API to reject
await agent.act('Simulate a microphone permission error');
// Check: Error message is displayed
await agent.check('An error message is shown to the user');
// Check: Voice mode returns to idle state
await agent.check('The button shows "Start Voice Conversation"');
});
test('[Happy Path] Text input is disabled during voice mode', async (agent) => {
await agent.open('http://localhost:3000/chat');
// Check: Text input is enabled initially
await agent.check('The text input field "Or type your thoughts here..." is enabled');
// Act: Start voice mode
await agent.act('Click the "Start Voice Conversation" button');
// Check: Text input is disabled
await agent.check('The text input field is disabled');
// Act: Stop voice mode
await agent.act('Click the main voice button to stop');
// Check: Text input is enabled again
await agent.check('The text input field is enabled');
});
test('[Happy Path] User can type a message while voice mode is idle', async (agent) => {
await agent.open('http://localhost:3000/chat');
// Act: Type a message in the text input
await agent.act('Type "This is a text message" into the text input field');
// Act: Submit the message
await agent.act('Press Enter or click the Send button');
// Check: Message appears in chat
await agent.check('The message "This is a text message" appears as a user message');
// Wait for AI response
await agent.wait(5000);
// Check: AI responds
await agent.check('An AI response appears in the chat');
});