diff --git a/app/layout.tsx b/app/layout.tsx index 0a4889b..6f93285 100644 --- a/app/layout.tsx +++ b/app/layout.tsx @@ -4,6 +4,7 @@ import "./globals.css"; import { MantineProvider, ColorSchemeScript } from "@mantine/core"; import { Notifications } from "@mantine/notifications"; import "@mantine/notifications/styles.css"; +import { Analytics } from "@vercel/analytics/react"; import { theme } from "./theme"; import { AppLayout } from "@/components/AppLayout"; @@ -43,6 +44,7 @@ export default function RootLayout({ {children} + ); diff --git a/db/schema.surql b/db/schema.surql index e229cda..d2c3691 100644 --- a/db/schema.surql +++ b/db/schema.surql @@ -68,9 +68,9 @@ DEFINE FIELD coords_3d ON TABLE node TYPE array -- Define the vector search index. -- We use MTREE (or HNSW) for high-performance k-NN search. --- The dimension (768) MUST match the output of the --- 'text-embedding-004' model. -DEFINE INDEX node_embedding_idx ON TABLE node FIELDS embedding MTREE DIMENSION 768; +-- The dimension (3072) MUST match the output of the +-- 'gemini-embedding-001' model. +DEFINE INDEX node_embedding_idx ON TABLE node FIELDS embedding MTREE DIMENSION 3072; -- -------------------------------------------------- -- Relation: links_to diff --git a/package.json b/package.json index d80bde3..ad088ac 100644 --- a/package.json +++ b/package.json @@ -25,6 +25,7 @@ "@react-three/drei": "latest", "@react-three/fiber": "latest", "@tabler/icons-react": "^3.35.0", + "@vercel/analytics": "^1.5.0", "@xstate/react": "^6.0.0", "ai": "latest", "jsonwebtoken": "latest", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 2ae933d..d543bc8 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -47,6 +47,9 @@ importers: '@tabler/icons-react': specifier: ^3.35.0 version: 3.35.0(react@19.2.0) + '@vercel/analytics': + specifier: ^1.5.0 + version: 1.5.0(next@16.0.1(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react@19.2.0) '@xstate/react': specifier: ^6.0.0 version: 6.0.0(@types/react@19.2.2)(react@19.2.0)(xstate@5.24.0) @@ -1295,6 +1298,32 @@ packages: peerDependencies: react: '>= 16.8.0' + '@vercel/analytics@1.5.0': + resolution: {integrity: sha512-MYsBzfPki4gthY5HnYN7jgInhAZ7Ac1cYDoRWFomwGHWEX7odTEzbtg9kf/QSo7XEsEAqlQugA6gJ2WS2DEa3g==} + peerDependencies: + '@remix-run/react': ^2 + '@sveltejs/kit': ^1 || ^2 + next: '>= 13' + react: ^18 || ^19 || ^19.0.0-rc + svelte: '>= 4' + vue: ^3 + vue-router: ^4 + peerDependenciesMeta: + '@remix-run/react': + optional: true + '@sveltejs/kit': + optional: true + next: + optional: true + react: + optional: true + svelte: + optional: true + vue: + optional: true + vue-router: + optional: true + '@vercel/oidc@3.0.3': resolution: {integrity: sha512-yNEQvPcVrK9sIe637+I0jD6leluPxzwJKx/Haw6F4H77CdDsszUn5V3o96LPziXkSNE2B83+Z3mjqGKBK/R6Gg==} engines: {node: '>= 20'} @@ -4591,6 +4620,11 @@ snapshots: '@use-gesture/core': 10.3.1 react: 19.2.0 + '@vercel/analytics@1.5.0(next@16.0.1(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react@19.2.0)': + optionalDependencies: + next: 16.0.1(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(@playwright/test@1.56.1)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + react: 19.2.0 + '@vercel/oidc@3.0.3': {} '@webgpu/types@0.1.66': {} diff --git a/todo.md b/todo.md index 73dd622..3dc62c6 100644 --- a/todo.md +++ b/todo.md @@ -2,13 +2,4 @@ Upcoming items that should be implemented (time-permitting): -- a way to see the visualized version of all nodes in the db -- let's call the "AI" "Mr. DJ" and link to this youtube video for its name: - https://www.youtube.com/watch?v=oEauWw9ZGrA -- let's have "Ponderants" in the top-left corner with some sort of very minimal - svg that represents an abstraction of a human conversing with a robot (like - maybe four simple shapes max) -- let's have, in the top-center, something that indicates we're in "Convo" mode -- let's stream the AI output to deepgram for faster synthesis -- we should link bluesky posts back to ponderants -- long posts should be broken up into threads +- stream the AI output to deepgram for faster synthesis