Compare commits

..

4 Commits

Author SHA1 Message Date
Enrico Ros 3a9a6b0273 Merge branch 'jondwillis-feature/auth' into jondwillis-feature/auth-merged 2023-05-01 23:44:07 -07:00
Enrico Ros 3b51c39fc3 Small bits 2023-05-01 22:13:34 -07:00
Enrico Ros 05293ba557 Merge branch 'feature/auth' of https://github.com/jondwillis/nextjs-chatgpt-app into jondwillis-feature/auth 2023-05-01 22:13:02 -07:00
jon d18d5323aa auth squash and rebase 2023-04-11 13:25:39 -07:00
499 changed files with 8450 additions and 39227 deletions
-38
View File
@@ -1,38 +0,0 @@
# big-AGI non-code files
/docs/
README.md
# Node build artifacts
/node_modules
/.pnp
.pnp.js
# next.js
/.next/
/out/
# production
/build
# versioning
.git/
.github/
# IDEs
.idea/
# debug
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.pnpm-debug.log*
# local env files
.env*.local
# vercel
.vercel
# typescript
*.tsbuildinfo
next-env.d.ts
+33
View File
@@ -0,0 +1,33 @@
# [Recommended for local deployments] Backend API key for OpenAI, so that users don't need one (UI > this > '')
OPENAI_API_KEY=
# [Not needed] Set the backend host for the OpenAI API, to enable platforms such as Helicone (UI > this > api.openai.com)
OPENAI_API_HOST=
# [Not needed] Sets the "OpenAI-Organization" header field to support organization users (UI > this > '')
OPENAI_API_ORG_ID=
# [Optional] Enables ElevenLabs credentials on the server side - for optional text-to-speech
ELEVENLABS_API_KEY=
ELEVENLABS_API_HOST=
ELEVENLABS_VOICE_ID=
# [Optional] Prodia credentials on the server side - for optional image generation
PRODIA_API_KEY=
# [Optional, Search] Google Cloud API Key
# https://console.cloud.google.com/apis/credentials -
GOOGLE_CLOUD_API_KEY=
# [Optional, Search] Google Custom/Programmable Search Engine ID
# https://programmablesearchengine.google.com/
GOOGLE_CSE_ID=
# see docs/authentication.md to configure this section
AUTH_TYPE=
# [At least one required if AUTH_TYPE == credential] You may declare credentials for users from 0 to 99.
AUTH_USER_0=
AUTH_PASSWORD_0=
# [Required if AUTH_TYPE == basic and not in development mode] See: https://next-auth.js.org/configuration/options#nextauth_url
NEXTAUTH_URL=
# [Required if AUTH_TYPE == basic] See: https://next-auth.js.org/configuration/options#secret
NEXTAUTH_SECRET=
-13
View File
@@ -1,13 +0,0 @@
# These are supported funding model platforms
github: enricoros # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
patreon: # Replace with a single Patreon username
open_collective: # Replace with a single Open Collective username
ko_fi: # Replace with a single Ko-fi username
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
-32
View File
@@ -1,32 +0,0 @@
name: 🐞 Bug Report
description: Create a report to help us improve
title: '[BUG]'
labels: [ 'type: bug' ]
body:
- type: markdown
attributes:
value: Thank you for reporting a bug.
- type: textarea
attributes:
label: Description
description: (required) Please provide a clear description. Please also provide the steps to reproduce.
placeholder: 'Concise description + steps to reproduce.'
validations:
required: true
- type: textarea
attributes:
label: Device and browser
description: '(required) Please specify your Mobile/Desktop device, OS version, browser.'
placeholder: 'Device: (e.g., iPhone 16, Pixel 9, PC, Macbook...), OS: (e.g., iOS 17, Windows 12), Browser: (e.g., Chrome 119, Safari 18, Firefox..)'
validations:
required: true
- type: textarea
attributes:
label: Screenshots and more
placeholder: 'Attach screenshots, or add any additional context here.'
- type: checkboxes
attributes:
label: Willingness to Contribute
description: We appreciate contributions - would you be willing to submit a pull request?
options:
- label: '🙋‍♂️ Yes, I would like to contribute a fix.'
@@ -1,110 +0,0 @@
---
name: Maintainers-Release
about: Maintainers
title: Release 1.2.3
labels: ''
assignees: enricoros
---
## Release checklist:
- [x] Create a new [Release Issue](https://github.com/enricoros/big-AGI/issues/new?assignees=enricoros&projects=enricoros/4&template=maintainers-release.md&title=Release+1.2.3)
- [ ] Replace 1.1.0 with the _former_ release, and _1.2.3_ with THIS
- [ ] Update the [Roadmap](https://github.com/users/enricoros/projects/4/views/2) calling out shipped features
- [ ] Create and update a [Milestone](https://github.com/enricoros/big-agi/milestones) for the release
- [ ] Assign this task
- [ ] Assign all the shipped roadmap Issues
- [ ] Assign the relevant [recently closed Isssues](https://github.com/enricoros/big-agi/issues?q=is%3Aclosed+sort%3Aupdated-desc)
- Code changes:
- [ ] Create a release branch 'release-x.y.z': `git checkout -b release-1.2.3`
- [ ] Create a temporary tag `git tag v1.2.3 && git push opensource --tags`
- [ ] Create a [New Draft GitHub Release](https://github.com/enricoros/big-agi/releases/new), and generate the automated changelog (for new contributors)
- [ ] Update the release version in package.json, and `npm i`
- [ ] Update in-app News [src/apps/news/news.data.tsx](/src/apps/news/news.data.tsx)
- [ ] Update the in-app News version number
- [ ] Update the README.md with the new release
- [ ] Copy the highlights to the [docs/changelog.md](/docs/changelog.md)
- Release:
- [ ] merge onto main `git checkout main && git merge --no-ff release-1.2.3`
- [ ] re-tag `git tag -f v1.2.3 && git push opensource --tags -f`
- [ ] verify deployment on Vercel
- [ ] verify container on GitHub Packages
- [ ] update the GitHub release
- [ ] push as stable `git push opensource main:main-stable`
- Announce:
- [ ] Discord announcement
- [ ] Twitter announcement
### Links
- Milestone: https://github.com/enricoros/big-AGI/milestone/X
- GitHub release: https://github.com/enricoros/big-AGI/releases/tag/v1.2.3
- Former release task: #...
## Artifacts Generation
```markdown
You help me generate the following collateral for the new release of my opensource application called big-AGI. The new release is 1.2.3.
To familiarize yourself with the application, the following are the Website and the GitHub README.md.
```
- paste the URL: https://big-agi.com
- drag & drop: [README.md](https://raw.githubusercontent.com/enricoros/big-AGI/main/README.md)
```markdown
I am announcing a new version, 1.2.3.
For reference, the following was the collateral for 1.1.0 (Discord announcement, GitHub Release, in-app-news file news.data.tsx).
```
- paste the former: `discord announcement`,
- `GitHub release`,
- `news.data.tsx`,
- `changelog.md`
```markdown
The following are the new developments for 1.2.3:
- ...
- git log --pretty=format:"%h %an %B" v1.1.0..v1.2.3 | clip
```
- paste the link to the milestone (closed) and each individual issue (content will be downloaded)
- paste the output of the git log command
### news.data.tsx
```markdown
I need the following from you:
1. a table summarizing all the new features in 1.2.3 with the following columns: 4 words description (exactly what it is), short description, usefulness (what it does for the user), significance, link to the issue number (not the commit)), which will be used for the artifacts later
2. then double-check the git log to see if there are any features of significance that are not in the table
3. then score each feature in terms of importance for users (1-10), relative impact of the feature (1-10, where 10 applies to the broadest user base), and novelty and uniqueness (1-10, where 10 is truly unique and novel from what exists already)
4. then improve the table, in decreasing order of importance for features, fixing any detail that's missing, in particular check if there are commits of significance from a user or developer point of view, which are not contained in the table
5. then I want you then to update the news.data.tsx for the new release
```
### Readme (and Changelog)
```markdown
I need you to update the README.md and the with the new release.
Attaching the in-app news, with my language for you to improve on, but keep the tone.
```
### GitHub release
```markdown
Please create the 1.2.3 Release Notes for GitHub, following the format of the 1.1.0 GitHub release notes attached before.
Use a truthful and honest tone, understanding that people's time and attention span is short.
Today is 2024-XXXX-YYYY.
```
Now paste-attachment the former release notes (or 1.5.0 which was accurate and great), including the new contributors and
some stats (# of commits, etc.), and roll it for the new release.
### Discord announcement
```markdown
Can you generate my 1.2.3 big-AGI discord announcement from the GitHub Release announcement?
Please keep the formatting and stye of the discord announcement for 1.1.0, but with the new messaging above.
```
-18
View File
@@ -1,18 +0,0 @@
---
name: Roadmap request
about: Suggest a roadmap item
title: "[Roadmap]"
labels: ''
assignees: ''
---
**Why**
(replace this text with yours) The reason behind the request - we love it to be framed for "users will be able to do x" rather than quick-aging hype-tech-of-the-day requests
**Description**
Clear and concise description of what you want to happen.
**Requirements**
If you can, Please break-down the changes use cases, UX, technology, architecture, etc.
- [ ] ...
+4 -14
View File
@@ -7,15 +7,11 @@
# To get a newer version, you will need to update the SHA.
# You can also reference a tag or branch, but the action may change without warning.
name: Create and publish Docker images
name: Create and publish a Docker image
on:
push:
branches:
- main
#- main-stable # Disabled as the v* tag is used for stable releases
tags:
- 'v*' # Trigger on version tags (e.g., v1.7.0)
branches: ['main']
env:
REGISTRY: ghcr.io
@@ -30,7 +26,7 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Log in to the Container registry
uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1
@@ -44,17 +40,11 @@ jobs:
uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=raw,value=development,enable=${{ github.ref == 'refs/heads/main' }}
type=raw,value=stable,enable=${{ github.ref == 'refs/heads/main-stable' }}
type=ref,event=tag # Use the tag name as a tag for tag builds
type=semver,pattern={{version}} # Generate semantic versioning tags for tag builds
- name: Build and push Docker image
uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4
with:
context: .
file: Dockerfile
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
labels: ${{ steps.meta.outputs.labels }}
+1 -2
View File
@@ -26,8 +26,7 @@ yarn-error.log*
.pnpm-debug.log*
# local env files
.env
.env.*
.env*.local
# vercel
.vercel
+1 -2
View File
@@ -1,7 +1,6 @@
{
"singleAttributePerLine": false,
"singleQuote": true,
"trailingComma": "all",
"endOfLine": "lf",
"printWidth": 160
}
}
+24 -39
View File
@@ -1,56 +1,41 @@
# Base
FROM node:18-alpine AS base
ENV NEXT_TELEMETRY_DISABLED 1
# Test
FROM node:18-alpine as test-target
ENV NODE_ENV=development
ENV PATH $PATH:/usr/src/app/node_modules/.bin
# Dependencies
FROM base AS deps
WORKDIR /app
WORKDIR /usr/src/app
# Dependency files
COPY package*.json ./
COPY prisma ./prisma
# Install dependencies, including dev (release builds should use npm ci)
ENV NODE_ENV development
RUN npm ci
# CI and release builds should use npm ci to fully respect the lockfile.
# Local development may use npm install for opportunistic package updates.
ARG npm_install_command=ci
RUN npm $npm_install_command
# Builder
FROM base AS builder
WORKDIR /app
# Copy development deps and source
COPY --from=deps /app/node_modules ./node_modules
COPY . .
# Build the application
ENV NODE_ENV production
# Build
FROM test-target as build-target
ENV NODE_ENV=production
# Use build tools, installed as development packages, to produce a release build.
RUN npm run build
# Reduce installed packages to production-only
# Reduce installed packages to production-only.
RUN npm prune --production
# Runner
FROM base AS runner
WORKDIR /app
# Archive
FROM node:18-alpine as archive-target
ENV NODE_ENV=production
ENV PATH $PATH:/usr/src/app/node_modules/.bin
# As user
RUN addgroup --system --gid 1001 nodejs
RUN adduser --system --uid 1001 nextjs
WORKDIR /usr/src/app
# Copy Built app
COPY --from=builder --chown=nextjs:nodejs /app/public public
COPY --from=builder --chown=nextjs:nodejs /app/.next .next
COPY --from=builder --chown=nextjs:nodejs /app/node_modules node_modules
# Minimal ENV for production
ENV NODE_ENV production
ENV PATH $PATH:/app/node_modules/.bin
# Run as non-root user
USER nextjs
# Include only the release build and production packages.
COPY --from=build-target /usr/src/app/node_modules node_modules
COPY --from=build-target /usr/src/app/.next .next
# Expose port 3000 for the application to listen on
EXPOSE 3000
# Start the application
CMD ["next", "start"]
CMD ["next", "start"]
+1 -1
View File
@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2023-2024 Enrico Ros
Copyright (c) 2023 Enrico Ros
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
+84 -119
View File
@@ -1,85 +1,37 @@
# BIG-AGI 🧠✨
# `BIG-AGI` 🤖💬
Welcome to big-AGI 👋, the GPT application for professionals that need function, form,
simplicity, and speed. Powered by the latest models from 11 vendors and
open-source model servers, `big-AGI` offers best-in-class Voice and Chat with AI Personas,
visualizations, coding, drawing, calling, and quite more -- all in a polished UX.
Welcome to `big-AGI`, FKA `nextjs-chatgpt-app`. 👋🎉
Personal AGI App, powered by `OpenAI GPT-4` and beyond. Designed for smart humans and super-heroes,
this responsive web app comes with Personas, Drawing, Code Execution, PDF imports, Voice support,
data Rendering, AGI functions, chats and more. Show your friends some `#big-AGI-energy` 🚀
Pros use big-AGI. 🚀 Developers love big-AGI. 🤖
[![Official Website](https://img.shields.io/badge/BIG--AGI.com-%23096bde?style=for-the-badge&logo=vercel&label=launch)](https://big-agi.com)
[![Official Website](https://img.shields.io/badge/BIG--AGI.com-%23096bde?style=for-the-badge&logo=vercel&label=demo)](https://big-agi.com)
Or fork & run on Vercel
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY&envDescription=Backend%20API%20keys%2C%20optional%20and%20may%20be%20overridden%20by%20the%20UI.&envLink=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI%2Fblob%2Fmain%2Fdocs%2Fenvironment-variables.md&project-name=big-agi)
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY,OPENAI_API_HOST&envDescription=OpenAI%20KEY%20for%20your%20deployment.%20Set%20HOST%20only%20if%20non-default.)
## 👉 [roadmap](https://github.com/users/enricoros/projects/4/views/2)
## Useful 👊
big-AGI is an open book; our **[public roadmap](https://github.com/users/enricoros/projects/4/views/2)**
shows the current developments and future ideas.
![Ask away, paste a ton, copy the gems](docs/pixels/big-AGI-compo1.png)
- Got a suggestion? [_Add your roadmap ideas_](https://github.com/enricoros/big-agi/issues/new?&template=roadmap-request.md)
- Want to contribute? [_Pick up a task!_](https://github.com/users/enricoros/projects/4/views/4) - _easy_ to _pro_
- Engaging AI Personas
- Clean UX, w/ tokens counters
- Privacy: user-owned API keys and localStorage
- Human I/O: Advanced voice support (TTS, STT)
- Machine I/O: PDF import & Summarization, code execution
- Many more updates & integrations: ElevenLabs, Helicone, Paste.gg, Prodia
- Coming up: automatic-AGI reasoning
### What's New in 1.13.0 · Feb 8, 2024 · Multi + Mind
https://github.com/enricoros/big-AGI/assets/32999/01732528-730e-41dc-adc7-511385686b13
- **Side-by-Side Split Windows**: multitask with parallel conversations. [#208](https://github.com/enricoros/big-AGI/issues/208)
- **Multi-Chat Mode**: message everyone, all at once. [#388](https://github.com/enricoros/big-AGI/issues/388)
- **Export tables as CSV** - big thanks to @aj47. [#392](https://github.com/enricoros/big-AGI/pull/392)
- **Adjustable Text Size**: enjoy denser chats. [#399](https://github.com/enricoros/big-AGI/issues/399)
- Dev2 Persona Technology Preview
- Better looking chats with improved spacing, fonts, and menus
- More: new video player, [LM Studio tutorial](https://github.com/enricoros/big-AGI/blob/main/docs/config-lmstudio.md), [MongoDB support](https://github.com/enricoros/big-AGI/blob/main/docs/config-database.md) (thanks @ranfysvalle02), and speedups
### What's New in 1.12.0 · Jan 26, 2024 · AGI Hotline
https://github.com/enricoros/big-AGI/assets/32999/95ceb03c-945d-4fdd-9a9f-3317beb54f3f
- **Voice Calls**: real-time voice call your personas out of the blue or in relation to a chat [#354](https://github.com/enricoros/big-AGI/issues/354)
- Support **OpenAI 0125** Models. [#364](https://github.com/enricoros/big-AGI/issues/364)
- Rename or Auto-Rename chats. [#222](https://github.com/enricoros/big-AGI/issues/222), [#360](https://github.com/enricoros/big-AGI/issues/360)
- More control over **Link Sharing** [#356](https://github.com/enricoros/big-AGI/issues/356)
- **Accessibility** to screen readers [#358](https://github.com/enricoros/big-AGI/issues/358)
- Export chats to Markdown [#337](https://github.com/enricoros/big-AGI/issues/337)
- Paste tables from Excel [#286](https://github.com/enricoros/big-AGI/issues/286)
- Ollama model updates and context window detection fixes [#309](https://github.com/enricoros/big-AGI/issues/309)
### What's New in 1.11.0 · Jan 16, 2024 · Singularity
https://github.com/enricoros/big-AGI/assets/1590910/a6b8e172-0726-4b03-a5e5-10cfcb110c68
- **Find chats**: search in titles and content, with frequency ranking. [#329](https://github.com/enricoros/big-AGI/issues/329)
- **Commands**: command auto-completion (type '/'). [#327](https://github.com/enricoros/big-AGI/issues/327)
- **[Together AI](https://www.together.ai/products#inference)** inference platform support (good speed and newer models). [#346](https://github.com/enricoros/big-AGI/issues/346)
- Persona Creator history, deletion, custom creation, fix llm API timeouts
- Enable adding up to five custom OpenAI-compatible endpoints
- Developer enhancements: new 'Actiles' framework
For full details and former releases, check out the [changelog](docs/changelog.md).
## ✨ Key Features 👊
![big-AGI screenshot](docs/pixels/big-AGI-compo-20240201_small.png)
- **AI Personas**: Tailor your AI interactions with customizable personas
- **Sleek UI/UX**: A smooth, intuitive, and mobile-responsive interface
- **Efficient Interaction**: Voice commands, OCR, and drag-and-drop file uploads
- **Multiple AI Models**: Choose from a variety of leading AI providers
- **Privacy First**: Self-host and use your own API keys for full control
- **Advanced Tools**: Execute code, import PDFs, and summarize documents
- **Seamless Integrations**: Enhance functionality with various third-party services
- **Open Roadmap**: Contribute to the progress of big-AGI
## 💖 Support
## Support 🙌
[//]: # ([![Official Discord](https://img.shields.io/discord/1098796266906980422?label=discord&logo=discord&logoColor=%23fff&style=for-the-badge)](https://discord.gg/MkH4qj2Jp9))
[![Official Discord](https://discordapp.com/api/guilds/1098796266906980422/widget.png?style=banner2)](https://discord.gg/MkH4qj2Jp9)
* Enjoy the hosted open-source app on [big-AGI.com](https://big-agi.com)
* [Chat with us](https://discord.gg/MkH4qj2Jp9)
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) for your friends and family
* Enjoy the hosted open-source app on [big-AGI.com](https://get.big-agi.com)
* [Chat with us](https://discord.gg/MkH4qj2Jp9). We just started!
* Deploy your [fork](https://github.com/enricoros/big-agi/fork) and surprise your friends with big-GPT
energy!
* send PRs! ...
🎭[Editing Personas](https://github.com/enricoros/big-agi/issues/35),
🧩[Reasoning Systems](https://github.com/enricoros/big-agi/issues/36),
@@ -88,14 +40,70 @@ For full details and former releases, check out the [changelog](docs/changelog.m
<br/>
## 🧩 Develop
## Latest Drops 🚀
#### 🚨 May: mature #big-agi-energy
- 🎉 **Authentication** basic user authentication framework
#### April: #big-agi-energy grows
- 🎉 **[Google Search](docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google Search
- 🎉 **[Reason+Act](docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
- 🎉 **[Image Generation](docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
- 🎉 **[Voice Synthesis](docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
- 🎉 **[Precise Token Counter](docs/pixels/feature_token_counter.png)** 📈 extra-useful to pack the context window
- 🎉 **[Install Mobile APP](docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
- 🎉 **[UI language](docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
- 🎉 **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
- 🎉 **Code Execution: [Codepen](https://codepen.io/)/[Replit](https://replit.com/)** 💻 (@harlanlewis)
- 🎉 **[SVG Drawing](docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
- 🎉 Chats: multiple chats, AI titles, Import/Export, Selection mode
- 🎉 Rendering: Markdown, SVG, improved Code blocks
- 🎉 Integrations: OpenAI organization ID
- 🎉 [Cloudflare deployment instructions](docs/deploy-cloudflare.md),
[awesome-agi](https://github.com/enricoros/awesome-agi)
- 🎉 [Typing Avatars](docs/pixels/gif_typing_040123.gif) ⌨️
<!-- p><a href="docs/pixels/gif_typing_040123.gif"><img src="docs/pixels/gif_typing_040123.gif" width='700' alt="New Typing Avatars"/></a></p -->
#### March: first release
- 🎉 **[AI Personas](docs/pixels/feature_purpose_two.png)** - including Code, Science, Corporate, and Chat 🎭
- 🎉 **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
- 🎉 **Context** - Attach or [Drag & Drop files](docs/pixels/feature_drop_target.png) to add them to the prompt 📁
- 🎉 **Syntax highlighting** - for multiple languages 🌈
- 🎉 **Code Execution: Sandpack
** - [now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
- 🎉 Chat with GPT-4 and 3.5 Turbo 🧠💨
- 🎉 Real-time streaming of AI responses ⚡
- 🎉 **Voice Input** 🎙️ - works great on Chrome / Windows
- 🎉 Integration: **[Paste.gg](docs/pixels/feature_paste_gg.png)** integration for chat sharing 📥
- 🎉 Integration: **[Helicone](https://www.helicone.ai/)** integration for API observability 📊
- 🌙 Dark model - Wide mode ⛶
<br/>
### Basic Authentication for public deployments 🔐
To protect the web app owner from incurring unauthorized costs when deploying the app with a backend API key (`OPENAI_API_KEY`), you can [set up basic authentication.](/docs/authentication.md).
## Why this? 💡
Because the official Chat ___lacks important features___, is ___more limited than the api___, at times
___slow or unavailable___, and you cannot deploy it yourself, remix it, add features, or share it with
your friends.
Our users report that ___big-AGI is faster___, ___more reliable___, and ___features rich___
with features that matter to them.
![Much features, so fun](docs/pixels/big-AGI-compo2b.png)
## Code 🧩
![TypeScript](https://img.shields.io/badge/TypeScript-007ACC?style=&logo=typescript&logoColor=white)
![React](https://img.shields.io/badge/React-61DAFB?style=&logo=react&logoColor=black)
![Next.js](https://img.shields.io/badge/Next.js-000000?style=&logo=vercel&logoColor=white)
Clone this repo, install the dependencies (all locally), and run the development server (which auto-watches the
files for changes):
Clone this repo, install the dependencies, and run the development server:
```bash
git clone https://github.com/enricoros/big-agi.git
@@ -104,55 +112,12 @@ npm install
npm run dev
```
The development app will be running on `http://localhost:3000`. Development builds have the advantage of not requiring
a build step, but can be slower than production builds. Also, development builds won't have timeout on edge functions.
Now the app should be running on `http://localhost:3000`
## 🌐 Deploy manually
### Integrations:
The _production_ build of the application is optimized for performance and is performed by the `npm run build` command,
after installing the required dependencies.
```bash
# .. repeat the steps above up to `npm install`, then:
npm run build
next start --port 3000
```
The app will be running on the specified port, e.g. `http://localhost:3000`.
Want to deploy with username/password? See the [Authentication](docs/deploy-authentication.md) guide.
## 🐳 Deploy with Docker
For more detailed information on deploying with Docker, please refer to the [docker deployment documentation](docs/deploy-docker.md).
Build and run:
```bash
docker build -t big-agi .
docker run -d -p 3000:3000 big-agi
```
Or run the official container:
- manually: `docker run -d -p 3000:3000 ghcr.io/enricoros/big-agi`
- or, with docker-compose: `docker-compose up` or see [the documentation](docs/deploy-docker.md) for a composer file with integrated browsing
## ☁️ Deploy on Cloudflare Pages
Please refer to the [Cloudflare deployment documentation](docs/deploy-cloudflare.md).
## 🚀 Deploy on Vercel
Create your GitHub fork, create a Vercel project over that fork, and deploy it. Or press the button below for convenience.
[![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-agi&env=OPENAI_API_KEY&envDescription=Backend%20API%20keys%2C%20optional%20and%20may%20be%20overridden%20by%20the%20UI.&envLink=https%3A%2F%2Fgithub.com%2Fenricoros%2Fbig-AGI%2Fblob%2Fmain%2Fdocs%2Fenvironment-variables.md&project-name=big-agi)
## Integrations:
* Local models: Ollama, Oobabooga, LocalAi, etc.
* [ElevenLabs](https://elevenlabs.io/) Voice Synthesis (bring your own voice too) - Settings > Text To Speech
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Models > OpenAI > Advanced > API Host: 'oai.hconeai.com'
* [Helicone](https://www.helicone.ai/) LLM Observability Platform - Settings > Advanced > API Host: 'oai.hconeai.com'
* [Paste.gg](https://paste.gg/) Paste Sharing - Chat Menu > Share via paste.gg
* [Prodia](https://prodia.com/) Image Generation - Settings > Image Generation > Api Key & Model
@@ -167,4 +132,4 @@ This project is licensed under the MIT License.
[//]: # ([![GitHub issues]&#40;https://img.shields.io/github/issues/enricoros/big-agi&#41;]&#40;https://github.com/enricoros/big-agi/issues&#41;)
Made with 💙
Made with 💙
-52
View File
@@ -1,52 +0,0 @@
import { createEmptyReadableStream, safeErrorString, serverFetchOrThrow } from '~/server/wire';
import { elevenlabsAccess, elevenlabsVoiceId, ElevenlabsWire, speechInputSchema } from '~/modules/elevenlabs/elevenlabs.router';
/* NOTE: Why does this file even exist?
This file is a workaround for a limitation in tRPC; it does not support ArrayBuffer responses,
and that would force us to use base64 encoding for the audio data, which would be a waste of
bandwidth. So instead, we use this file to make the request to ElevenLabs, and then return the
response as an ArrayBuffer. Unfortunately this means duplicating the code in the server-side
and client-side vs. the tRPC implementation. So at lease we recycle the input structures.
*/
const handler = async (req: Request) => {
try {
// construct the upstream request
const {
elevenKey, text, voiceId, nonEnglish,
streaming, streamOptimization,
} = speechInputSchema.parse(await req.json());
const path = `/v1/text-to-speech/${elevenlabsVoiceId(voiceId)}` + (streaming ? `/stream?optimize_streaming_latency=${streamOptimization || 1}` : '');
const { headers, url } = elevenlabsAccess(elevenKey, path);
const body: ElevenlabsWire.TTSRequest = {
text: text,
...(nonEnglish && { model_id: 'eleven_multilingual_v1' }),
};
// elevenlabs POST
const upstreamResponse: Response = await serverFetchOrThrow(url, 'POST', headers, body);
// NOTE: this is disabled, as we pass-through what we get upstream for speed, as it is not worthy
// to wait for the entire audio to be downloaded before we send it to the client
// if (!streaming) {
// const audioArrayBuffer = await upstreamResponse.arrayBuffer();
// return new NextResponse(audioArrayBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
// }
// stream the data to the client
const audioReadableStream = upstreamResponse.body || createEmptyReadableStream();
return new Response(audioReadableStream, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
} catch (error: any) {
const fetchOrVendorError = safeErrorString(error) + (error?.cause ? ' · ' + error.cause : '');
console.log(`api/elevenlabs/speech: fetch issue: ${fetchOrVendorError}`);
return new Response(`[Issue] elevenlabs: ${fetchOrVendorError}`, { status: 500 });
}
};
export const runtime = 'edge';
export { handler as POST };
-2
View File
@@ -1,2 +0,0 @@
export const runtime = 'edge';
export { llmStreamingRelayHandler as POST } from '~/modules/llms/server/llm.server.streaming';
-19
View File
@@ -1,19 +0,0 @@
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouterEdge } from '~/server/api/trpc.router-edge';
import { createTRPCFetchContext } from '~/server/api/trpc.server';
const handlerEdgeRoutes = (req: Request) =>
fetchRequestHandler({
router: appRouterEdge,
endpoint: '/api/trpc-edge',
req,
createContext: createTRPCFetchContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC-edge failed on ${path ?? '<no-path>'}:`, error)
: undefined,
});
export const runtime = 'edge';
export { handlerEdgeRoutes as GET, handlerEdgeRoutes as POST };
-19
View File
@@ -1,19 +0,0 @@
import { fetchRequestHandler } from '@trpc/server/adapters/fetch';
import { appRouterNode } from '~/server/api/trpc.router-node';
import { createTRPCFetchContext } from '~/server/api/trpc.server';
const handlerNodeRoutes = (req: Request) =>
fetchRequestHandler({
router: appRouterNode,
endpoint: '/api/trpc-node',
req,
createContext: createTRPCFetchContext,
onError:
process.env.NODE_ENV === 'development'
? ({ path, error }) => console.error(`❌ tRPC-node failed on ${path ?? '<no-path>'}:`, error)
: undefined,
});
export const runtime = 'nodejs';
export { handlerNodeRoutes as GET, handlerNodeRoutes as POST };
-14
View File
@@ -1,14 +0,0 @@
# Very simple docker-compose file to run the app on http://localhost:3000 (or http://127.0.0.1:3000).
#
# For more examples, such runnin big-AGI alongside a web browsing service, see the `docs/docker` folder.
version: '3.9'
services:
big-agi:
image: ghcr.io/enricoros/big-agi:latest
ports:
- "3000:3000"
env_file:
- .env
command: [ "next", "start", "-p", "3000" ]
+37
View File
@@ -0,0 +1,37 @@
### Authentication with NextAuth.js 🔐
To protect the web app owner from incurring unauthorized costs when deploying the app with a backend API
key (`OPENAI_API_KEY`), you can set up basic authentication using [NextAuth.js](https://next-auth.js.org/).
#### Configuration
Update your `.env` file or Environment Variables with the following variables:
```
# [Optional] Set the authentication type to "credential" to enable basic username/password authentication
AUTH_TYPE=credential
# [Required if AUTH_TYPE == credential] Define credentials for users - you can declare up to 100 users
AUTH_USER_0=your_username
AUTH_PASSWORD_0=your_password
AUTH_USER_1=...
AUTH_PASSWORD_1=...
...
# [Required if AUTH_TYPE == credential and *not in development mode*] See: https://next-auth.js.org/configuration/options#nextauth_url
NEXTAUTH_URL=https://example.com
# [Required if AUTH_TYPE == credential] See: https://next-auth.js.org/configuration/options#secret
NEXTAUTH_SECRET=your_nextauth_secret
```
You can add multiple users by incrementing the index, e.g., `AUTH_USER_1`, `AUTH_PASSWORD_1`, and so on. They do not
need to be contiguous.
#### Usage
Once you have set up basic authentication, users will be prompted to enter their credentials when accessing the app.
Only users with valid credentials will be able to use the app and make requests to the OpenAI API.
For more information on configuring and using NextAuth.js, refer to
the [official documentation](https://next-auth.js.org/).
-190
View File
@@ -1,190 +0,0 @@
## Changelog
This is a high-level changelog. Calls out some of the high level features batched
by release.
- For the live roadmap, please see [the GitHub project](https://github.com/users/enricoros/projects/4/views/2)
### 1.13.0 - Feb 2024
- milestone: [1.13.0](https://github.com/enricoros/big-agi/milestone/13)
- work in progress: [big-AGI open roadmap](https://github.com/users/enricoros/projects/4/views/2), [help here](https://github.com/users/enricoros/projects/4/views/4)
## What's New in 1.13.0 · Feb 8, 2024 · Multi + Mind
https://github.com/enricoros/big-AGI/assets/32999/01732528-730e-41dc-adc7-511385686b13
- **Side-by-Side Split Windows**: multitask with parallel conversations. [#208](https://github.com/enricoros/big-AGI/issues/208)
- **Multi-Chat Mode**: message everyone, all at once. [#388](https://github.com/enricoros/big-AGI/issues/388)
- **Export tables as CSV** - big thanks to @aj47. [#392](https://github.com/enricoros/big-AGI/pull/392)
- **Adjustable Text Size**: enjoy denser chats. [#399](https://github.com/enricoros/big-AGI/issues/399)
- Dev2 Persona Technology Preview
- Better looking chats with improved spacing, fonts, and menus
- More: new video player, [LM Studio tutorial](https://github.com/enricoros/big-AGI/blob/main/docs/config-lmstudio.md), [MongoDB support](https://github.com/enricoros/big-AGI/blob/main/docs/config-database.md) (thanks @ranfysvalle02), and speedups
## What's New in 1.12.0 · Jan 26, 2024 · AGI Hotline
https://github.com/enricoros/big-AGI/assets/32999/95ceb03c-945d-4fdd-9a9f-3317beb54f3f
- **Voice Calls**: real-time voice call your personas out of the blue or in relation to a chat [#354](https://github.com/enricoros/big-AGI/issues/354)
- Support **OpenAI 0125** Models. [#364](https://github.com/enricoros/big-AGI/issues/364)
- Rename or Auto-Rename chats. [#222](https://github.com/enricoros/big-AGI/issues/222), [#360](https://github.com/enricoros/big-AGI/issues/360)
- More control over **Link Sharing** [#356](https://github.com/enricoros/big-AGI/issues/356)
- **Accessibility** to screen readers [#358](https://github.com/enricoros/big-AGI/issues/358)
- Export chats to Markdown [#337](https://github.com/enricoros/big-AGI/issues/337)
- Paste tables from Excel [#286](https://github.com/enricoros/big-AGI/issues/286)
- Ollama model updates and context window detection fixes [#309](https://github.com/enricoros/big-AGI/issues/309)
### What's New in 1.11.0 · Jan 16, 2024 · Singularity
https://github.com/enricoros/big-AGI/assets/1590910/a6b8e172-0726-4b03-a5e5-10cfcb110c68
- **Find chats**: search in titles and content, with frequency ranking. [#329](https://github.com/enricoros/big-AGI/issues/329)
- **Commands**: command auto-completion (type '/'). [#327](https://github.com/enricoros/big-AGI/issues/327)
- **[Together AI](https://www.together.ai/products#inference)** inference platform support (good speed and newer models). [#346](https://github.com/enricoros/big-AGI/issues/346)
- Persona Creator history, deletion, custom creation, fix llm API timeouts
- Enable adding up to five custom OpenAI-compatible endpoints
- Developer enhancements: new 'Actiles' framework
### What's New in 1.10.0 · Jan 6, 2024 · The Year of AGI
- **New UI**: for both desktop and mobile, sets the stage for future scale. [#201](https://github.com/enricoros/big-AGI/issues/201)
- **Conversation Folders**: enhanced conversation organization. [#321](https://github.com/enricoros/big-AGI/issues/321)
- **[LM Studio](https://lmstudio.ai/)** support and improved token management
- Resizable panes in split-screen conversations.
- Large performance optimizations
- Developer enhancements: new UI framework, updated documentation for proxy settings on browserless/docker
### What's New in 1.9.0 · Dec 28, 2023 · Creative Horizons
- **DALL·E 3 integration** for enhanced image generation. [#212](https://github.com/enricoros/big-AGI/issues/212)
- **Perfect scrolling mechanics** across devices. [#304](https://github.com/enricoros/big-AGI/issues/304)
- Persona creation now supports **text input**. [#287](https://github.com/enricoros/big-AGI/pull/287)
- Openrouter updates for better model management and rate limit handling
- Image drawing UX improvements
- Layout fix for Firefox users
- Developer enhancements: Text2Image subsystem, Optima layout, ScrollToBottom library, Panes library, and Llms subsystem updates.
### What's New in 1.8.0 · Dec 20, 2023 · To The Moon And Back
- **Google Gemini Support**: Use the newest Google models. [#275](https://github.com/enricoros/big-agi/issues/275)
- **Mistral Platform**: Mixtral and future models support. [#273](https://github.com/enricoros/big-agi/issues/273)
- **Diagram Instructions**. Thanks to @joriskalz! [#280](https://github.com/enricoros/big-agi/pull/280)
- Ollama Chats: Enhanced chatting experience. [#270](https://github.com/enricoros/big-agi/issues/270)
- Mac Shortcuts Fix: Improved UX on Mac
- **Single-Tab Mode**: Data integrity with single window. [#268](https://github.com/enricoros/big-agi/issues/268)
- **Updated Models**: Latest Ollama (v0.1.17) and OpenRouter models
- Official Downloads: Easy access to the latest big-AGI on [big-AGI.com](https://big-agi.com)
- For developers: [troubleshot networking](https://github.com/enricoros/big-AGI/issues/276#issuecomment-1858591483), fixed Vercel deployment, cleaned up the LLMs/Streaming framework
### What's New in 1.7.0 · Dec 11, 2023 · Attachment Theory
- **Attachments System Overhaul**: Drag, paste, link, snap, text, images, PDFs and more. [#251](https://github.com/enricoros/big-agi/issues/251)
- **Desktop Webcam Capture**: Image capture now available as Labs feature. [#253](https://github.com/enricoros/big-agi/issues/253)
- **Independent Browsing**: Full browsing support with Browserless. [Learn More](https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md)
- **Overheat LLMs**: Push the creativity with higher LLM temperatures. [#256](https://github.com/enricoros/big-agi/issues/256)
- **Model Options Shortcut**: Quick adjust with `Ctrl+Shift+O`
- Optimized Voice Input and Performance
- Latest Ollama and Oobabooga models
- For developers: **Password Protection**: HTTP Basic Auth. [Learn How](https://github.com/enricoros/big-agi/blob/main/docs/deploy-authentication.md)
### What's New in 1.6.0 - Nov 28, 2023 · Surf's Up
- **Web Browsing**: Download web pages within chats - [browsing guide](https://github.com/enricoros/big-agi/blob/main/docs/config-browse.md)
- **Branching Discussions**: Create new conversations from any message
- **Keyboard Navigation**: Swift chat navigation with new shortcuts (e.g. ctrl+alt+left/right)
- **Performance Boost**: Faster rendering for a smoother experience
- **UI Enhancements**: Refined interface based on user feedback
- **New Features**: Anthropic Claude 2.1, `/help` command, and Flattener tool
- **For Developers**: Code quality upgrades and snackbar notifications
### What's New in 1.5.0 - Nov 19, 2023 · Loaded
- **Continued Voice**: Engage with hands-free interaction for a seamless experience
- **Visualization Tool**: Create data representations with our new visualization capabilities
- **Ollama Local Models**: Leverage local models support with our comprehensive guide
- **Text Tools**: Enjoy tools including highlight differences to refine your content
- **Mermaid Diagramming**: Render complex diagrams with our Mermaid language support
- **OpenAI 1106 Chat Models**: Experience the cutting-edge capabilities of the latest OpenAI models
- **SDXL Support**: Enhance your image generation with SDXL support for Prodia
- **Cloudflare OpenAI API Gateway**: Integrate with Cloudflare for a robust API gateway
- **Helicone for Anthropic**: Utilize Helicone's tools for Anthropic models
For Developers:
- Runtime Server-Side configuration: https://github.com/enricoros/big-agi/issues/189. Env vars are
not required to be set at build time anymore. The frontend will roundtrip to the backend at the
first request to get the configuration. See
https://github.com/enricoros/big-agi/blob/main/src/modules/backend/backend.router.ts.
- CloudFlare developers: please change the deployment command to
`rm app/api/trpc-node/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`,
as we transitioned to the App router in NextJS 14. The documentation in
[docs/deploy-cloudflare.md](../docs/deploy-cloudflare.md) is updated
### 1.4.0: Sept/Oct: scale OUT
- **Expanded Model Support**: Azure and [OpenRouter](https://openrouter.ai/docs#models) models, including gpt-4-32k
- **Share and clone** conversations with public links
- Removed the 20 chats hard limit ([Ashesh3](https://github.com/enricoros/big-agi/pull/158))
- Latex Rendering
- Augmented Chat modes (Labs)
### July/Aug: More Better Faster
- **Camera OCR** - real-world AI - take a picture of a text, and chat with it
- **Anthropic models** support, e.g. Claude
- **Backup/Restore** - save chats, and restore them later
- **[Local model support with Oobabooga server](../docs/config-local-oobabooga)** - run your own LLMs!
- **Flatten conversations** - conversations summarizer with 4 modes
- **Fork conversations** - create a new chat, to try with different endings
- New commands: /s to add a System message, and /a for an Assistant message
- New Chat modes: Write-only - just appends the message, without assistant response
- Fix STOP generation - in sync with the Vercel team to fix a long-standing NextJS issue
- Fixes on the HTML block - particularly useful to see error pages
### June: scale UP
- **[New OpenAI Models](https://openai.com/blog/function-calling-and-other-api-updates) support** - 0613 models, including 16k and 32k
- **Cleaner UI** - with rationalized Settings, Modals, and Configurators
- **Dynamic Models Configurator** - easy connection with different model vendors
- **Multiple Model Vendors Support** framework to support many LLM vendors
- **Per-model Options** (temperature, tokens, etc.) for fine-tuning AI behavior to your needs
- Support for GPT-4-32k
- Improved Dialogs and Messages
- Much Enhanced DX: TRPC integration, modularization, pluggable UI, etc
### April / May: more #big-agi-energy
- **[Google Search](../docs/pixels/feature_react_google.png)** active in ReAct - add your keys to Settings > Google
Search
- **[Reason+Act](../docs/pixels/feature_react_turn_on.png)** preview feature - activate with 2-taps on the 'Chat' button
- **[Image Generation](../docs/pixels/feature_imagine_command.png)** using Prodia (BYO Keys) - /imagine - or menu option
- **[Voice Synthesis](../docs/pixels/feature_voice_1.png)** 📣 with ElevenLabs, including selection of custom voices
- **[Precise Token Counter](../docs/pixels/feature_token_counter.png)** 📈 extra-useful to pack the context window
- **[Install Mobile APP](../docs/pixels/feature_pwa.png)** 📲 looks like native (@harlanlewis)
- **[UI language](../docs/pixels/feature_language.png)** with auto-detect, and future app language! (@tbodyston)
- **PDF Summarization** 🧩🤯 - ask questions to a PDF! (@fredliubojin)
- **Code Execution: [Codepen](https://codepen.io/)/[Replit](https://replit.com/)** 💻 (@harlanlewis)
- **[SVG Drawing](../docs/pixels/feature_svg_drawing.png)** - draw with AI 🎨
- Chats: multiple chats, AI titles, Import/Export, Selection mode
- Rendering: Markdown, SVG, improved Code blocks
- Integrations: OpenAI organization ID
- [Cloudflare deployment instructions](../docs/deploy-cloudflare.md),
[awesome-agi](https://github.com/enricoros/awesome-agi)
- [Typing Avatars](../docs/pixels/gif_typing_040123.gif) ⌨️
<!-- p><a href="../docs/pixels/gif_typing_040123.gif"><img src="../docs/pixels/gif_typing_040123.gif" width='700' alt="New Typing Avatars"/></a></p -->
### March: first release
- **[AI Personas](../docs/pixels/feature_purpose_two.png)** - including Code, Science, Corporate, and Chat 🎭
- **Privacy**: user-owned API keys 🔑 and localStorage 🛡️
- **Context** - Attach or [Drag & Drop files](../docs/pixels/feature_drop_target.png) to add them to the prompt 📁
- **Syntax highlighting** - for multiple languages 🌈
- **Code Execution: Sandpack** -
[now on branch]((https://github.com/enricoros/big-agi/commit/f678a0d463d5e9cf0733f577e11bd612b7902d89)) `variant-code-execution`
- Chat with GPT-4 and 3.5 Turbo 🧠💨
- Real-time streaming of AI responses ⚡
- **Voice Input** 🎙️ - works great on Chrome / Windows
- Integration: **[Paste.gg](../docs/pixels/feature_paste_gg.png)** integration for chat sharing 📥
- Integration: **[Helicone](https://www.helicone.ai/)** integration for API observability 📊
- 🌙 Dark model - Wide mode ⛶
-87
View File
@@ -1,87 +0,0 @@
# Configuring Azure OpenAI Service with `big-AGI`
The entire procedure takes about 5 minutes and involves creating an Azure account,
setting up the Azure OpenAI service, deploying models, and configuring `big-AGI`
to access these models.
Please note that Azure operates on a 'pay-as-you-go' pricing model and requires
credit card information tied to a 'subscription' to the Azure service.
## Configuring `big-AGI`
If you have an `API Endpoint` and `API Key`, you can configure big-AGI as follows:
1. Launch the `big-AGI` application
2. Go to the **Models** settings
3. Add a Vendor and select **Azure OpenAI**
- Enter the Endpoint (e.g., 'https://your-openai-api-1234.openai.azure.com/')
- Enter the API Key (e.g., 'fd5...........................ba')
The deployed models are now available in the application. If you don't have a configured
Azure OpenAI service instance, continue with the next section.
## Setting Up Azure
### Step 1: Azure Account & Subscription
1. Create an account on [azure.microsoft.com](https://azure.microsoft.com/en-us/)
2. Go to the [Azure Portal](https://portal.azure.com/)
3. Click on **Create a resource** in the top left corner
4. Search for **Subscription** and select **[Create Subscription](https://portal.azure.com/#create/Microsoft.Subscription)**
- Fill in the required fields and click on **Create**
- Note down the **Subscription ID** (e.g., `12345678-1234-1234-1234-123456789012`)
### Step 2: Apply for Azure OpenAI Service
We'll now be creating "OpenAI"-specific resources on Azure. This requires to 'apply',
and acceptance should be quick (even as low as minutes).
1. Visit [Azure OpenAI Service](https://aka.ms/azure-openai)
2. Click on **Apply for access**
- Fill in the required fields (including the subscription ID) and click on **Apply**
Once your application is accepted, you can create OpenAI resources on Azure.
### Step 3: Create Azure OpenAI Resource
For more information, see [Azure: Create and deploy OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
1. Click on **Create a resource** in the top left corner
2. Search for **OpenAI** and select **[Create OpenAI](https://portal.azure.com/#create/Microsoft.CognitiveServicesOpenAI)**
3. Fill in the necessary fields on the **Create OpenAI** page
![Creating an OpenAI service](pixels/config-azure-openai-create.png)
- Select the subscription
- Select a resource group or create a new one
- Select the region. Note that the region determines the available models.
> For instance, **Canada East** offers GPT-4-32k models, For the full list, see [GPT-4 models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
- Name the service (e.g., `your-openai-api-1234`)
- Select a pricing tier (e.g., `S0` for standard)
- Select: "All networks, including the internet, can access this resource."
- Click on **Review + create** and then **Create**
After creating the resource, you can access the API Keys and Endpoints. At any point, you can go to
the OpenAI Service instance page to get this information.
- Click on **Go to resource**
- Click on **Develop**
- Copy the `Endpoint`, called "Language API", e.g. 'https://your-openai-api-1234.openai.azure.com/'
- Copy `KEY 1`
### Step 4: Deploy Models
By default, Azure OpenAI resource instances don't have models available. You need to deploy the models you want to use.
1. Click on **Model Deployments > Manage Deployments**
2. Click on **+Create New Deployment**
![Deploying a model](pixels/config-azure-openai-deploy.png)
- Select the model you want to deploy
- Optionally select a version
- name the model, e.g., `gpt4-32k-0613`
Repeat as necessary for each model you want to deploy.
## Resources
- [Azure OpenAI Service Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/)
- [Guide: Create an Azure OpenAI Resource](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal)
- [Azure OpenAI Models](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models)
-111
View File
@@ -1,111 +0,0 @@
# Browse Functionality in big-AGI 🌐
Allows users to load web pages across various components of `big-AGI`. This feature is supported by Puppeteer-based
browsing services, which are the most common way to render web pages in a headless environment.
Once configured, the Browsing service provides this functionality:
- **Paste a URL**: Simply paste/drag a URL into the chat, and `big-AGI` will load and attach the page (very effective)
- **Use /browse**: Type `/browse [URL]` in the chat to command `big-AGI` to load the specified web page
- **ReAct**: ReAct will automatically use the `loadURL()` function whenever a URL is encountered
First of all, you need to procure a Puppteer web browsing service endpoint. `big-AGI` supports services like:
| Service | Working | Type | Location | Special Features |
|--------------------------------------------------------------------------------------|---------|-------------|----------------|---------------------------------------------|
| [BrightData Scraping Browser](https://brightdata.com/products/scraping-browser) | Yes | Proprietary | Cloud | Advanced scraping tools, global IP pool |
| [Cloudflare Browser Rendering](https://developers.cloudflare.com/browser-rendering/) | ? | Proprietary | Cloud | Integrated CDN, optimized browser rendering |
| ⬇️ [Browserless 2.0](#-browserless-20) | Okay | OpenSource | Local (Docker) | Parallelism, debug viewer, advanced APIs |
| ⬇️ [Your Chrome Browser (ALPHA)](#-your-own-chrome-browser) | Alpha | Proprietary | Local (Chrome) | Personal, experimental use (ALPHA!) |
| other Puppeteer-based WSS Services | ? | Varied | Cloud/Local | Service-specific features |
## Configuration
1. **Procure an Endpoint**
- Ensure that your browsing service is running (remote or local) and has a WebSocket endpoint available
- Write down the address: `wss://${auth}@{some host}:{port}`, or ws:// for local services on your machine
2. **Configure `big-AGI`**
- navigate to **Preferences** > **Tools** > **Browse**
- Enter the 'wss://...' connection string provided by your browsing service
3. **Enable Features**: Choose which browse-related features you want to enable:
- **Attach URLs**: Automatically load and attach a page when pasting a URL into the composer
- **/browse Command**: Use the `/browse` command in the chat to load a web page
- **ReAct**: Enable the `loadURL()` function in ReAct for advanced interactions
### 🌐 Browserless 2.0
[Browserless 2.0](https://github.com/browserless/browserless) is a Docker-based service that provides a headless
browsing experience compatible with `big-AGI`. An open-source solution that simplifies web automation tasks,
in a scalable manner.
Launch Browserless with:
```bash
docker run -p 9222:3000 browserless/chrome:latest
```
Now you can use the following connection string in `big-AGI`: `ws://127.0.0.1:9222`.
You can also browse to [http://127.0.0.1:9222](http://127.0.0.1:9222) to see the Browserless debug viewer
and configure some options.
The chat agent won't be able to access the web sites if the browserless container does not have direct Internet access. You can resolve the issue by defining internet proxy for the running container. You can then use the evironment file in the a `docker-compose.yaml
```
browserless:
image: browserless/chrome:latest
env_file:
- .env
ports:
- "9222:3000" # Map host's port 9222 to container's port 3000
environment:
- MAX_CONCURRENT_SESSIONS=10
```
You can then add the proyy lines to your `.env` file.
```
https_proxy=http://PROXY-IP:PROXY-PORT
http_proxy=http://PROXY-IP:PROXY-PORT
```
This is how you can define it in a one liner docker
`docker run --env https_proxy=http://PROXY-IP:PROXY-PORT --env http_proxy=http://PROXY-IP:PROXY-PORT -p 9222:3000 browserless/chrome:latest `
Note: if you are using `docker-compose`, please see the
[docker/docker-compose-browserless.yaml](docker/docker-compose-browserless.yaml) file for an example
on how to run `big-AGI` and Browserless simultaneously in a single application.
### 🌐 Your own Chrome browser
***EXPERIMENTAL - UNTESTED*** - You can use your own Chrome browser as a browsing service, by configuring it to expose
a WebSocket endpoint.
- close all the Chrome instances (on Windows, check the Task Manager if still running)
- start Chrome with the following command line options (on Windows, you can edit the shortcut properties):
- `--remote-debugging-port=9222`
- go to http://localhost:9222/json/version and copy the `webSocketDebuggerUrl` value
- it should be something like: `ws://localhost:9222/...`
- paste the value into the Endpoint configuration (see point 2 in the configuration)
### Server-Side Configuration
You can set the Puppeteer WebSocket endpoint (`PUPPETEER_WSS_ENDPOINT`) in the deployment before running it.
This is useful for self-hosted instances or when you want to pre-configure the endpoint for all users, and will
allow your to skip points 2 and 3 above.
Always deploy your own user authentication, authorization and security solution. For this feature, the tRPC
route that provides browsing service, shall be secured with a user authentication and authorization solution,
to prevent unauthorized access to the browsing service.
## Support
If you encounter any issues or have questions about configuring the browse functionality, join our community on Discord for support and discussions.
[![Official Discord](https://discordapp.com/api/guilds/1098796266906980422/widget.png?style=banner2)](https://discord.gg/MkH4qj2Jp9)
---
Enjoy the enhanced browsing experience within `big-AGI` and explore the web without ever leaving your chat!
-65
View File
@@ -1,65 +0,0 @@
**Connecting Your Database for Enhanced Features:**
This guide outlines the database options and setup steps for enabling features like Chat Link Sharing in your application.
### Choose Your Database:
**1. Serverless Postgres (default):**
- Available on Vercel, Neon, and other platforms.
- Less feature-rich but a suitable option depending on your needs.
- **Connection String:** Replace placeholders with your Postgres credentials.
- `postgres://USER:PASS@SOMEHOST.postgres.vercel-storage.com/SOMEDB?pgbouncer=true&connect_timeout=15`
**2. MongoDB Atlas (alternative):**
- **Highly Recommended:** More than a database, it's a data platform. MongoDB Atlas is a robust cloud-based platform that offers scalability, security, and a suite of developer tools. No need for a separate vector database, you can query your vector embeddings right within your operational database!
- **Additional Features:** MongoDB Atlas is packed with unique features designed to streamline the development process such as: Atlas App Services, Atlas search (with vector search), Atlas charts, Data Federation, and more.
- **Connection String:** Replace placeholders with your Atlas credentials.
- `mongodb://USER:PASS@CLUSTER-NAME.mongodb.net/DATABASE-NAME?retryWrites=true&w=majority`
### Environment Variables:
#### Postgres:
| Variable | |
|--------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `POSTGRES_PRISMA_URL` | `postgres://USER:PASS@SOMEHOST.postgres.vercel-storage.com/SOMEDB?pgbouncer=true&connect_timeout=15` |
| `POSTGRES_URL_NON_POOLING` (optional) | URL for the Postgres database without pooling (specific use cases) |
#### MongoDB:
| Variable | |
|--------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `MDB_URI` | `mongodb://USER:PASS@CLUSTER-NAME.mongodb.net/DATABASE-NAME?retryWrites=true&w=majority` |
### MongoDB Atlas + Prisma
When using MongoDB Atlas, you'll need to make the below changes to the file `prisma.schema`
```
...
datasource db {
provider = "mongodb"
url = env("MDB_URI")
}
//
// Storage of Linked Data
//
model LinkStorage {
id String @id @default(uuid()) @map("_id")
// ...rest of file
```
### Initial Setup Steps:
1. **Run `npx prisma db:push`:** Create or update the database schema (run once after connecting).
### Additional Resources:
- Prisma documentation: [https://www.prisma.io/docs/](https://www.prisma.io/docs/)
- MongoDB Atlas: [https://www.mongodb.com/atlas/database](https://www.mongodb.com/atlas/database)
- Atlas App Services: [https://www.mongodb.com/docs/atlas/app-services/](https://www.mongodb.com/docs/atlas/app-services/)
- Atlas vector search: [https://www.mongodb.com/products/platform/atlas-vector-search/](https://www.mongodb.com/products/platform/atlas-vector-search)
- Atlas Data Federation: [https://www.mongodb.com/products/platform/atlas-data-federation](https://www.mongodb.com/products/platform/atlas-data-federation)
-51
View File
@@ -1,51 +0,0 @@
# Integrating LM Studio with big-AGI
Quickly set up LM Studio with big-AGI to run local and open LLMs on your computer for enhanced privacy and control over AI interactions.
## Video Tutorial
For a visual step-by-step guide, watch our [YouTube tutorial](https://www.youtube.com/watch?v=MqXzxVokMDk).
[![Running big-AGI locally with LM Studio YouTube Tutorial](http://img.youtube.com/vi/MqXzxVokMDk/0.jpg)](http://www.youtube.com/watch?v=MqXzxVokMDk "Running big-AGI locally with LM Studio")
## Quick Setup Guide
### Installing big-AGI
Clone and set up big-AGI:
```bash
git clone https://github.com/enricoros/big-agi.git && cd big-agi
npm install # Or: yarn install
npm run dev # Or: yarn dev
# If missing dependencies:
npm install @mui/material # Or: yarn add @mui/material
```
### Configuring LM Studio
Ensure LM Studio is running (default: [http://localhost:1234](http://localhost:1234)).
Check the URL and modify if different.
1. Download local models in LM Studio
2. Start the LM Studio server
3. Optionally. Check the logs
### Integration in big-AGI
1. In big-AGI, navigate to **Models** > **Add** > **LM Studio**
2. Enter the API URL: `http://localhost:1234` (modify if different)
3. Refresh by clicking on the `Models` button to load models from LM Studio
## Troubleshooting
- **Missing @mui/material**: Execute `npm install @mui/material` or `yarn add @mui/material`
- **Connection Issues**: Check LM Studio's URL and ensure it's operational
## Further Assistance
Advanced configurations and more:
- big-AGI Community: [Discord](https://discord.gg/MkH4qj2Jp9)
- LM Studio: [LM Studio home page](https://lmstudio.ai/)
-34
View File
@@ -1,34 +0,0 @@
# Local LLM integration with `localai`
Integrate local Large Language Models (LLMs) with [LocalAI](https://localai.io).
_Last updated Nov 7, 2023_
## Instructions
### LocalAI installation and configuration
Follow the guide at: https://localai.io/basics/getting_started/
For instance with [Use luna-ai-llama2 with docker compose](https://localai.io/basics/getting_started/#example-use-luna-ai-llama2-model-with-docker-compose):
- clone LocalAI
- get the model
- copy the prompt template
- start docker
- -> the server will be listening on `localhost:8080`
- verify it works by going to [http://localhost:8080/v1/models](http://localhost:8080/v1/models) on
your browser and seeing listed the model you downloaded
### Integrating LocalAI with big-AGI
- Go to Models > Add a model source of type: **LocalAI**
- Enter the address: `http://localhost:8080` (default)
- If running remotely, replace localhost with the IP of the machine. Make sure to use the **IP:Port** format
- Load the models
- Select model & Chat
> NOTE: LocalAI does not list details about the mdoels. Every model is assumed to be
> capable of chatting, and with a context window of 4096 tokens.
> Please update the [src/modules/llms/transports/server/openai/models.data.ts](../src/modules/llms/server/openai/models.data.ts)
> file with the mapping information between LocalAI model IDs and names/descriptions/tokens, etc.
-61
View File
@@ -1,61 +0,0 @@
# Local LLM Integration with `text-web-ui` :llama:
Integrate local Large Language Models (LLMs) with
[oobabooga/text-generation-webui](https://github.com/oobabooga/text-generation-webui),
a specialized interface that includes a custom variant of the OpenAI API for a smooth integration process.
_Last updated on Dec 7, 2023_
### Components
The implementation of local LLMs involves the following components:
* **text-generation-webui**: A Python application with a Gradio web UI for operating Large Language Models.
* **Local Large Language Models "LLMs"**: Use large language models on your personal computer with consumer-grade GPUs or CPUs.
* **big-AGI**: An LLM UI that offers features such as Personas, OCR, Voice Support, Code Execution, AGI functions, and more.
## Instructions
This guide assumes that **big-AGI** is already installed on your system. Note that the text-generation-webui IP address must be accessible from the server running **big-AGI**.
### Text-web-ui Installation & Configuration:
1. Install [text-generation-webui](https://github.com/oobabooga/text-generation-webui#Installation):
- Follow the instructions in the official page (basicall clone the repo and run a script) [~10 minutes]
- Stop the Web UI as we need to modify the startup flags to enable the OpenAI API
2. Enable the **openai extension**
- Edit `CMD_FLAGS.txt`
- Make sure that `--listen --api` is present and uncommented
3. Restart text-generation-webui
- Double-click on "start"
- You should see something like:
```
2023-12-07 21:51:21 INFO:Loading the extension "openai"...
2023-12-07 21:51:21 INFO:OpenAI-compatible API URL:
http://0.0.0.0:5000
...
INFO: Uvicorn running on http://0.0.0.0:5000 (Press CTRL+C to quit)
Running on local URL: http://0.0.0.0:7860
```
- This shows that:
- The Web UI is running on port 7860: http://127.0.0.1:7860
- **The OpenAI API is running on port 5000: http://127.0.0.1:5000**
4. Load your first model
- Open the text-generation-webui at [127.0.0.1:7860](http://127.0.0.1:7860/)
- Switch to the **Model** tab
- Download, for instance, `TheBloke/Llama-2-7B-Chat-GPTQ`
- Select the model once it's loaded
### Integrating text-web-ui with big-AGI:
1. Integrating Text-Generation-WebUI with big-AGI:
- Go to Models > Add a model source of type: **Oobabooga**
- Enter the address: `http://127.0.0.1:5000`
- If running remotely, replace 127.0.0.1 with the IP of the machine. Make sure to use the **IP:Port** format
- Load the models
- The active model must be selected and LOADED on the text-generation-webui as it doesn't support model switching or parallel requests.
- Select model & Chat
![config-oobabooga-0.png](pixels/config-oobabooga-0.png)
Enjoy the privacy and flexibility of local LLMs with `big-AGI` and `text-generation-webui`!
-98
View File
@@ -1,98 +0,0 @@
# `Ollama` x `big-AGI` :llama:
This guide helps you connect [Ollama](https://ollama.ai) [models](https://ollama.ai/library) to
[big-AGI](https://big-agi.com) for a professional AI/AGI operation and a good UI/Conversational
experience. The integration brings the popular big-AGI features to Ollama, including: voice chats,
editing tools, models switching, personas, and more.
_Last updated Dec 16, 2023_
![config-local-ollama-0-example.png](pixels/config-ollama-0-example.png)
## Quick Integration Guide
1. **Ensure Ollama API Server is Running**: Follow the official instructions to get Ollama up and running on your machine
- For detailed instructions on setting up the Ollama API server, please refer to the
[Ollama download page](https://ollama.ai/download) and [instructions for linux](https://github.com/jmorganca/ollama/blob/main/docs/linux.md).
2. **Add Ollama as a Model Source**: In `big-AGI`, navigate to the **Models** section, select **Add a model source**, and choose **Ollama**
3. **Enter Ollama Host URL**: Provide the Ollama Host URL where the API server is accessible (e.g., `http://localhost:11434`)
4. **Refresh Model List**: Once connected, refresh the list of available models to include the Ollama models
> Optional: use the Ollama Admin interface to see which models are available and 'Pull' them in your local machine. Note
that this operation will likely timeout due to Edge Functions timeout on the big-AGI server while pulling, and
you'll have to press the 'Pull' button again, until a green message appears.
5. **Chat with Ollama models**: select an Ollama model and begin chatting with AI personas
**Visual Configuration Guide**:
* After adding the `Ollama` model vendor, entering the IP address of an Ollama server, and refreshing models:<br/>
<img src="pixels/config-ollama-1-models.png" alt="config-local-ollama-1-models.png" width="320">
* The `Ollama` admin panel, with the `Pull` button highlighted, after pulling the "Yi" model:<br/>
<img src="pixels/config-ollama-2-admin-pull.png" alt="config-local-ollama-2-admin-pull.png" width="320">
* You can now switch model/persona dynamically and text/voice chat with the models:<br/>
<img src="pixels/config-ollama-3-chat.png" alt="config-local-ollama-3-chat.png" width="320">
<br/>
### ⚠️ Network Troubleshooting
If you get errors about the server having trouble connecting with Ollama, please see
[this message](https://github.com/enricoros/big-AGI/issues/276#issuecomment-1858591483) on Issue #276.
And in brief, make sure the Ollama endpoint is accessible from the servers where you run big-AGI (which could
be localhost or cloud servers).
![Ollama Networking Chart](pixels/config-ollama-network.png)
<br/>
### Advanced: Model parameters
For users who wish to delve deeper into advanced settings, `big-AGI` offers additional configuration options, such
as the model temperature, maximum tokens, etc.
### Advanced: Ollama under a reverse proxy
You can elegantly expose your Ollama server to the internet (and thus make it easier to use from your server-side
big-AGI deployments) by exposing it on an http/https URL, such as: `https://yourdomain.com/ollama`
On Ubuntu Servers, you will need to install `nginx` and configure it to proxy requests to Ollama.
```bash
sudo apt update
sudo apt install nginx
sudo apt install certbot python3-certbot-nginx
sudo certbot --nginx -d yourdomain.com
```
Then, edit the nginx configuration file `/etc/nginx/sites-enabled/default` and add the following block:
```nginx
location /ollama/ {
proxy_pass http://localhost:11434;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_cache_bypass $http_upgrade;
# Disable buffering for the streaming responses
proxy_buffering off;
}
```
Reach out to our community if you need help with this.
<br/>
### Community and Support
Join our community to share your experiences, get help, and discuss best practices:
[![Official Discord](https://discordapp.com/api/guilds/1098796266906980422/widget.png?style=banner2)](https://discord.gg/MkH4qj2Jp9)
---
`big-AGI` is committed to providing a powerful, intuitive, and privacy-respecting AI experience.
We are excited for you to explore the possibilities with Ollama models. Happy creating!
-31
View File
@@ -1,31 +0,0 @@
# OpenRouter Configuration
[OpenRouter](https://openrouter.ai) is a standalone, premium service
that provides access to <Link href='https://openrouter.ai/docs#models' target='_blank'>exclusive AI models</Link>
such as GPT-4 32k, Claude, and more. These models are typically not available to the public.
This document details the process of integrating OpenRouter with big-AGI.
### 1. OpenRouter Account Setup and API Key Generation
1. Register for an OpenRouter account at [openrouter.ai](https://openrouter.ai) by clicking on Sign In > Continue with Google.
2. Top up your account (minimum $5) by navigating to [openrouter.ai/account](https://openrouter.ai/account) > Add Credits > Pay with Stripe.
3. Generate an API key at [openrouter.ai/keys](https://openrouter.ai/keys) > API Key > Generate API Key.
- **Remember to copy and securely store your API key** - the key will not be displayed again and will be in the format `sk-or-v1-...`.
- Keep the key confidential as it can be used to expend your credits.
### 2. Integrating OpenRouter with big-AGI
1. Launch big-AGI, and navigate to the AI **Models** settings.
2. Add a Vendor, and select **OpenRouter**.
![feature-openrouter-add.png](pixels/feature-openrouter-add.png)
3. Input the API key into the **OpenRouter API Key** field, and load the Models.
![feature-openrouter-configure.png](pixels/feature-openrouter-configure.png)
4. OpenAI GPT4-32k and other models will now be accessible and selectable in the application.
### Pricing
OpenRouter independently manages its service and pricing and is not affiliated with big-AGI.
For more detailed information, please visit [this page](https://openrouter.ai/docs#models).
Please note that running large models such as GPT-4 32k can be costly and may rapidly consume
credits - a single prompt may cost $1 or more, at the time of writing.
-45
View File
@@ -1,45 +0,0 @@
# Authentication
`big-AGI` does not come with built-in authentication. To secure your deployment, you can implement authentication
in one of the following ways:
1. Build `big-AGI` with support for ⬇️ [HTTP Authentication](#http-authentication)
2. Utilize user authentication features provided by your ⬇️ [cloud deployment platform](#cloud-deployments-authentication)
3. Develop a custom authentication solution
<br/>
### HTTP Authentication
[HTTP Basic Authentication](https://developer.mozilla.org/en-US/docs/Web/HTTP/Authentication) is a simple method
to secure your application.
To enable it in `big-AGI`, you **must manually build the application**:
- Build `big-AGI` with HTTP authentication enabled:
- Clone the repository
- Rename `middleware_BASIC_AUTH.ts` to `middleware.ts`
- Build: usual simple build procedure (e.g. [Deploy manually](../README.md#-deploy-manually) or [Deploying with Docker](deploy-docker.md))
- Configure the following [environment variables](environment-variables.md) before launching `big-AGI`:
```dotenv
HTTP_BASIC_AUTH_USERNAME=<your username>
HTTP_BASIC_AUTH_PASSWORD=<your password>
```
- Start the application 🔒
<br/>
### Cloud Deployments Authentication
> This approach allows you to enable authentication without rebuilding the application by using the features
> provided by your cloud platform to manage user accounts and access.
Many cloud deployment platforms offer built-in authentication mechanisms. Refer to the platform's documentation
for setup instructions:
1. [CloudFlare Access / Zero Trust](https://www.cloudflare.com/zero-trust/products/access/)
2. [Vercel Authentication](https://vercel.com/docs/security/deployment-protection/methods-to-protect-deployments/vercel-authentication)
3. [Vercel Password Protection](https://vercel.com/docs/security/deployment-protection/methods-to-protect-deployments/password-protection)
4. Let us know when you test more solutions (Heroku, AWS IAM, Google IAP, etc.)
+34 -47
View File
@@ -1,68 +1,55 @@
# Deploying a Next.js App on Cloudflare Pages
# Deploying Next.js App on Cloudflare Pages
> WARNING: Cloudflare Pages does not support traditional NodeJS runtimes, but only Edge Runtime functions.
>
> In this project we use Prisma connected to serverless Postgres, which at the moment cannot run on
> edge functions, so we cannot deploy this project on Cloudflare Pages.
>
> Workaround: Step 3.4. has been added below, to DELETE the NodeJS traditional runtime - which means that some
> parts of this application will not work.
> - [Side effects](https://github.com/enricoros/big-agi/blob/main/src/apps/chat/trade/server/trade.router.ts#L19):
> Sharing functionality to DB, and import from ChatGPT share, and post to Paste.GG will not work
> - See [Issue 174](https://github.com/enricoros/big-agi/issues/174).
>
> Longer term: follow [prisma/prisma: Support Edge Function deployments](https://github.com/prisma/prisma/issues/21394)
> and convert the Node runtime to Edge runtime once Prisma supports it.
Follow these steps to deploy your Next.js app on Cloudflare Pages. This guide is based on
the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
with a few additional steps.
This guide provides steps to deploy your Next.js app on Cloudflare Pages.
It is based on the [official Cloudflare developer documentation](https://developers.cloudflare.com/pages/framework-guides/deploy-a-nextjs-site/),
with some additional steps.
## Step 1: Fork the Repository
## Step 1: Repository Forking
Fork the repository to your own GitHub account.
Fork the repository to your personal GitHub account.
## Step 2: Connect Cloudflare Pages to Your GitHub Account
## Step 2: Linking Cloudflare Pages to Your GitHub Account
1. Go to the Cloudflare Pages section and click the `Create a project` button.
2. Click `Connect To Git` and give Cloudflare Pages either All GitHub account Repo access or selected Repo access. We
recommend using selected Repo access and selecting the forked repo from step 1.
1. Navigate to the Cloudflare Pages section and click on the `Create a project` button.
2. Click `Connect To Git` and grant Cloudflare Pages access to either all GitHub account repositories or selected repositories.
We recommend using selected Repo access and selecting the forked repository from step 1.
## Step 3: Setup Build and Deployments
## Step 3: Configuring Build and Deployments
1. Once you select the forked GitHub repo, click the `Begin Setup` button.
2. On this page, set your `Project name`, `Production branch` (e.g., main), and your Build settings.
3. Select `Next.js` from the `Framework preset` dropdown menu.
4. Leave the preset filled Build command and Build output directory as preset defaults.
5. Set `Environmental variables` (advanced) on this page to configure some variables as follows:
1. After selecting the forked GitHub repository, click the **Begin Setup** button
2. On this page, set your **Project name**, **Production branch** (e.g., main), and your Build settings
3. Choose `Next.js` from the **Framework preset** dropdown menu
4. Set a custom **Build Command**:
- `rm app/api/trpc-node/[trpc]/route.ts && npx @cloudflare/next-on-pages@1`
- see the tradeoffs for this deletion on the notice at the top
5. Keep the **Build output directory** as default
6. Click the **Save and Deploy** button
| Variable | Value |
|---------------------------|---------|
| `GO_VERSION` | `1.16` |
| `NEXT_TELEMETRY_DISABLED` | `1` |
| `NODE_VERSION` | `17` |
| `PHP_VERSION` | `7.4` |
| `PYTHON_VERSION` | `3.7` |
| `RUBY_VERSION` | `2.7.1` |
## Step 4: Monitoring the Deployment Process
6. Click the `Save and Deploy` button.
Observe the process as it initializes your build environment, clones the GitHub repository, builds the application, and deploys it
to the Cloudflare Network. Once complete, proceed to the project you created.
## Step 4: Monitor the Deployment Process
## Step 5: Required: Set the `nodejs_compat` compatibility flag
Watch the process run to initialize your build environment, clone the GitHub repo, build the application, and deploy to
the Cloudflare Network. Once that is done, proceed to the project you created.
1. Navigate to the [Settings > Functions](https://dash.cloudflare.com/?to=/:account/pages/view/:pages-project/settings/functions) page of your newly created project
2. Scroll to `Compatibility flags` and enter "`nodejs_compat`" for both **Production** and **Preview** environments.
It should look like this: ![](pixels/config-deploy-cloudflare-compat2.png)
3. Re-deploy your project for the new flags to take effect
## Step 6: (Optional) Custom Domain Configuration
## Step 5: Set up a Custom Domain
Use the `Custom domains` tab to set up your domain via CNAME.
## Step 7: (Optional) Access Policy and Web Analytics Configuration
## Step 6: Configure Access Policy and Web Analytics
Navigate to the `Settings` page and enable the following settings:
Go to the `Settings` page and enable the following settings:
1. Access Policy: Restrict [preview deployments](https://developers.cloudflare.com/pages/platform/preview-deployments/)
to members of your Cloudflare account via one-time pin and restrict primary `*.YOURPROJECT.pages.dev` domain.
Refer to [Cloudflare Pages known issues](https://developers.cloudflare.com/pages/platform/known-issues/#enabling-access-on-your-pagesdev-domain)
for more details.
See [Cloudflare Pages known issues](https://developers.cloudflare.com/pages/platform/known-issues/#enabling-access-on-your-pagesdev-domain)
for more information.
2. Enable Web Analytics.
Congratulations! You have successfully deployed your Next.js app on Cloudflare Pages.
Now you have successfully deployed your Next.js app on Cloudflare Pages.
+15 -66
View File
@@ -1,77 +1,26 @@
# Deploying `big-AGI` with Docker
# Deploy `big-AGI` with Docker 🐳
Utilize Docker containers to deploy the big-AGI application for an efficient and automated deployment process.
Docker ensures faster development cycles, easier collaboration, and seamless environment management.
Deploy the big-AGI application using Docker containers for a consistent, efficient, and automated deployment process. Enjoy faster development cycles, easier collaboration, and seamless environment management. 🚀
## Build and run your container 🔧
Docker is a platform for developing, packaging, and deploying applications as lightweight containers, ensuring consistent behavior across environments.
1. **Clone big-AGI**
```bash
git clone https://github.com/enricoros/big-agi.git
cd big-agi
```
2. **Build the Docker Image**: Build a local docker image from the provided Dockerfile:
```bash
docker build -t big-agi .
```
3. **Run the Docker Container**: start a Docker container from the newly built image,
and expose its http port 3000 to your `localhost:3000` using:
```bash
docker run -d -p 3000:3000 big-agi
```
4. Browse to [http://localhost:3000](http://localhost:3000)
## `big-AGI` Docker Components
<br/>
The big-AGI repository includes a Dockerfile and a GitHub Actions workflow for building and publishing a Docker image of the application.
## Run Official Containers 📦
### Dockerfile
`big-AGI` is pre-built from source code and published as a Docker image on the GitHub Container Registry (ghcr).
The build process is transparent, and happens via GitHub Actions, as described in the
file.
The [`Dockerfile`](../Dockerfile) sets up a Node.js environment, installs dependencies, and creates a production-ready version of the application.
### Official Images: [ghcr.io/enricoros/big-agi](https://github.com/enricoros/big-agi/pkgs/container/big-agi)
### GitHub Actions Workflow
#### Run using *docker* 🚀
The [`.github/workflows/docker-image.yml`](../.github/workflows/docker-image.yml) file automates building and publishing the Docker image when changes are pushed to the `main` branch.
```bash
docker run -d -p 3000:3000 ghcr.io/enricoros/big-agi:latest
```
## Deploy Steps
#### Run using *docker-compose* 🚀
1. Clone the big-AGI repository
2. Navigate to the project directory
3. Build the Docker image using the provided Dockerfile
4. Run the Docker container with the built image
If you have Docker Compose installed, you can run the Docker container with `docker-compose up`
to pull the Docker image (if it hasn't been pulled already) and start a Docker container. If you want to
update the image to the latest version, you can run `docker-compose pull` before starting the service.
```bash
docker-compose up -d
```
### Make Local Services Visible to Docker 🌐
To make local services running on your host machine accessible to a Docker container, such as a
[Browseless](./config-browse.md) service or a local API, you can follow this simplified guide:
| Operating System | Steps to Make Local Services Visible to Docker |
|:------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Windows and macOS | Use the special DNS name `host.docker.internal` to refer to the host machine from within the Docker container. No additional network configuration is required. Access local services using `host.docker.internal:<PORT>`. |
| Linux | Two options: *A*. Use <ins>--network="host"</ins> (`docker run --network="host" -d big-agi`) when running the Docker container to merge the container within the host network stack; however, this reduces container isolation. Alternatively: *B*. Connect to local services <ins>using the host's IP address</ins> directly, as host.docker.internal is not available by default on Linux. |
<br/>
### More Information
The [`Dockerfile`](../Dockerfile) describes how to create a Docker image. It establishes a Node.js environment,
installs dependencies, and creates a production-ready version of the application as a local container.
The [`docker-compose.yaml`](../docker-compose.yaml) file is configured to run the
official image (big-agi:latest). This file is used to define the `big-agi` service, to expose
port 3000 on the host, and launch big-AGI within the container (startup command).
The [`.github/workflows/docker-image.yml`](../.github/workflows/docker-image.yml) file is used
to build the Official Docker images and publish them to the GitHub Container Registry (ghcr).
The build process is transparent and happens via GitHub Actions.
<br/>
Leverage Docker's capabilities for a reliable and efficient big-AGI deployment!
Embrace the benefits of Docker for a reliable and efficient big-AGI deployment. 🎉
@@ -1,31 +0,0 @@
# This file is used to run `big-AGI` and `browserless` with Docker Compose.
#
# The two containers are linked together and `big-AGI` is configured to use `browserless`
# as its Puppeteer endpoint (from the containers intranet, it is available browserless:3000).
#
# From your host, you can access big-AGI on http://127.0.0.1:3000 and browserless on http://127.0.0.1:9222.
#
# To start the containers, run:
# docker-compose -f docs/docker/docker-compose-browserless.yaml up
version: '3.9'
services:
big-agi:
image: ghcr.io/enricoros/big-agi:latest
ports:
- "3000:3000"
env_file:
- .env
environment:
- PUPPETEER_WSS_ENDPOINT=ws://browserless:3000
command: [ "next", "start", "-p", "3000" ]
depends_on:
- browserless
browserless:
image: browserless/chrome:latest
ports:
- "9222:3000" # Map host's port 9222 to container's port 3000
environment:
- MAX_CONCURRENT_SESSIONS=10
-121
View File
@@ -1,121 +0,0 @@
# Environment Variables
This document provides an explanation of the environment variables used in the big-AGI application.
**All variables are optional**; and _UI options_ take precedence over _backend environment variables_,
which take place over _defaults_. This file is kept in sync with [`../src/server/env.mjs`](../src/server/env.mjs).
### Setting Environment Variables
Environment variables can be set by creating a `.env` file in the root directory of the project.
The following is an example `.env` for copy-paste convenience:
```bash
# Database (Postgres)
POSTGRES_PRISMA_URL=
POSTGRES_URL_NON_POOLING=
# Database (MongoDB)
MDB_URI=
# LLMs
OPENAI_API_KEY=
OPENAI_API_HOST=
OPENAI_API_ORG_ID=
AZURE_OPENAI_API_ENDPOINT=
AZURE_OPENAI_API_KEY=
ANTHROPIC_API_KEY=
ANTHROPIC_API_HOST=
GEMINI_API_KEY=
MISTRAL_API_KEY=
OLLAMA_API_HOST=
OPENROUTER_API_KEY=
TOGETHERAI_API_KEY=
# Model Observability: Helicone
HELICONE_API_KEY=
# Text-To-Speech
ELEVENLABS_API_KEY=
ELEVENLABS_API_HOST=
ELEVENLABS_VOICE_ID=
# Text-To-Image
PRODIA_API_KEY=
# Google Custom Search
GOOGLE_CLOUD_API_KEY=
GOOGLE_CSE_ID=
# Browse
PUPPETEER_WSS_ENDPOINT=
# Backend Analytics
BACKEND_ANALYTICS=
# Backend HTTP Basic Authentication (see `deploy-authentication.md` for turning on authentication)
HTTP_BASIC_AUTH_USERNAME=
HTTP_BASIC_AUTH_PASSWORD=
```
## Variables Documentation
### Database
For Database configuration see [config-database.md](config-database.md).
To enable features such as Chat Link Sharing, you need to connect the backend to a database. We currently support Postgres and MongoDB.
### LLMs
The following variables when set will enable the corresponding LLMs on the server-side, without
requiring the user to enter an API key
| Variable | Description | Required |
|-----------------------------|-------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------|
| `OPENAI_API_KEY` | API key for OpenAI | Recommended |
| `OPENAI_API_HOST` | Changes the backend host for the OpenAI vendor, to enable platforms such as Helicone and CloudFlare AI Gateway | Optional |
| `OPENAI_API_ORG_ID` | Sets the "OpenAI-Organization" header field to support organization users | Optional |
| `AZURE_OPENAI_API_ENDPOINT` | Azure OpenAI endpoint - host only, without the path | Optional, but if set `AZURE_OPENAI_API_KEY` must also be set |
| `AZURE_OPENAI_API_KEY` | Azure OpenAI API key, see [config-azure-openai.md](config-azure-openai.md) | Optional, but if set `AZURE_OPENAI_API_ENDPOINT` must also be set |
| `ANTHROPIC_API_KEY` | The API key for Anthropic | Optional |
| `ANTHROPIC_API_HOST` | Changes the backend host for the Anthropic vendor, to enable platforms such as [config-aws-bedrock.md](config-aws-bedrock.md) | Optional |
| `GEMINI_API_KEY` | The API key for Google AI's Gemini | Optional |
| `MISTRAL_API_KEY` | The API key for Mistral | Optional |
| `OLLAMA_API_HOST` | Changes the backend host for the Ollama vendor. See [config-ollama.md](config-ollama.md) | |
| `OPENROUTER_API_KEY` | The API key for OpenRouter | Optional |
| `TOGETHERAI_API_KEY` | The API key for Together AI | Optional |
### Model Observability: Helicone
Helicone provides observability to your LLM calls. It is a paid service, with a generous free tier.
It is currently supported for:
- **Anthropic**: by setting the Helicone API key, Helicone is automatically activated
- **OpenAI**: you also need to set `OPENAI_API_HOST` to `oai.hconeai.com`, to enable routing
| Variable | Description |
|--------------------|--------------------------|
| `HELICONE_API_KEY` | The API key for Helicone |
### Specials
Enable the app to Talk, Draw, and Google things up.
| Variable | Description |
|:---------------------------|:------------------------------------------------------------------------------------------------------------------------|
| **Text-To-Speech** | [ElevenLabs](https://elevenlabs.io/) is a high quality speech synthesis service |
| `ELEVENLABS_API_KEY` | ElevenLabs API Key - used for calls, etc. |
| `ELEVENLABS_API_HOST` | Custom host for ElevenLabs |
| `ELEVENLABS_VOICE_ID` | Default voice ID for ElevenLabs |
| **Google Custom Search** | [Google Programmable Search Engine](https://programmablesearchengine.google.com/about/) produces links to pages |
| `GOOGLE_CLOUD_API_KEY` | Google Cloud API Key, used with the '/react' command - [Link to GCP](https://console.cloud.google.com/apis/credentials) |
| `GOOGLE_CSE_ID` | Google Custom/Programmable Search Engine ID - [Link to PSE](https://programmablesearchengine.google.com/) |
| **Text-To-Image** | [Prodia](https://prodia.com/) is a reliable image generation service |
| `PRODIA_API_KEY` | Prodia API Key - used with '/imagine ...' |
| **Browse** | |
| `PUPPETEER_WSS_ENDPOINT` | Puppeteer WebSocket endpoint - used for browsing, etc. |
| **Backend** | |
| `BACKEND_ANALYTICS` | Semicolon-separated list of analytics flags (see backend.analytics.ts). Flags: `domain` logs the responding domain. |
| `HTTP_BASIC_AUTH_USERNAME` | See the [Authentication](deploy-authentication.md) guide. Username for HTTP Basic Authentication. |
| `HTTP_BASIC_AUTH_PASSWORD` | Password for HTTP Basic Authentication. |
---
Binary file not shown.

Before

Width:  |  Height:  |  Size: 303 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 279 KiB

After

Width:  |  Height:  |  Size: 283 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 209 KiB

After

Width:  |  Height:  |  Size: 255 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 618 KiB

After

Width:  |  Height:  |  Size: 626 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 370 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 730 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 38 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.8 KiB

After

Width:  |  Height:  |  Size: 8.2 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.6 MiB

After

Width:  |  Height:  |  Size: 3.8 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 55 KiB

After

Width:  |  Height:  |  Size: 84 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

After

Width:  |  Height:  |  Size: 16 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 79 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 54 KiB

After

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.6 KiB

After

Width:  |  Height:  |  Size: 3.7 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 32 KiB

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 5.6 KiB

After

Width:  |  Height:  |  Size: 8.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.7 KiB

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 60 KiB

After

Width:  |  Height:  |  Size: 94 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 157 KiB

After

Width:  |  Height:  |  Size: 195 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 156 KiB

After

Width:  |  Height:  |  Size: 192 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 10 KiB

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

After

Width:  |  Height:  |  Size: 62 KiB

+16
View File
@@ -0,0 +1,16 @@
import { withAuth } from 'next-auth/middleware';
import { authType } from '@/modules/authentication/auth.server';
// noinspection JSUnusedGlobalSymbols
export const middleware = !authType ? () => null : withAuth({
callbacks: {
authorized({ req, token }) {
// console.log('authorized', req, token);
return !!token;
},
},
});
export const config = { matcher: ['/:path*'] };
-59
View File
@@ -1,59 +0,0 @@
/**
* Middleware to protect `big-AGI` with HTTP Basic Authentication
*
* For more information on how to deploy with HTTP Basic Authentication, see:
* - [deploy-authentication.md](docs/deploy-authentication.md)
*/
import type { NextRequest } from 'next/server';
import { NextResponse } from 'next/server';
// noinspection JSUnusedGlobalSymbols
export function middleware(request: NextRequest) {
// Validate deployment configuration
if (!process.env.HTTP_BASIC_AUTH_USERNAME || !process.env.HTTP_BASIC_AUTH_PASSWORD) {
console.warn('HTTP Basic Authentication is enabled but not configured');
return new Response('Unauthorized/Unconfigured', unauthResponse);
}
// Request client authentication if no credentials are provided
const authHeader = request.headers.get('authorization');
if (!authHeader?.startsWith('Basic '))
return new Response('Unauthorized', unauthResponse);
// Request authentication if credentials are invalid
const base64Credentials = authHeader.split(' ')[1];
const credentials = Buffer.from(base64Credentials, 'base64').toString('ascii');
const [username, password] = credentials.split(':');
if (
!username || !password ||
username !== process.env.HTTP_BASIC_AUTH_USERNAME ||
password !== process.env.HTTP_BASIC_AUTH_PASSWORD
)
return new Response('Unauthorized', unauthResponse);
return NextResponse.next();
}
// Response to send when authentication is required
const unauthResponse: ResponseInit = {
status: 401,
headers: {
'WWW-Authenticate': 'Basic realm="Secure big-AGI"',
},
};
export const config = {
matcher: [
// Include root
'/',
// Include pages
'/(call|index|news|personas|link)(.*)',
// Include API routes
'/api(.*)',
// Note: this excludes _next, /images etc..
],
};
+27
View File
@@ -0,0 +1,27 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
reactStrictMode: true,
env: {
// defaults to TRUE, unless API Keys are set at build time; this flag is used by the UI
HAS_SERVER_KEY_OPENAI: !!process.env.OPENAI_API_KEY,
HAS_SERVER_KEY_ELEVENLABS: !!process.env.ELEVENLABS_API_KEY,
HAS_SERVER_KEY_PRODIA: !!process.env.PRODIA_API_KEY,
HAS_SERVER_KEYS_GOOGLE_CSE: !!process.env.GOOGLE_CLOUD_API_KEY && !!process.env.GOOGLE_CSE_ID,
// for auth only
SERVER_AUTH_TYPE: process.env.AUTH_TYPE,
},
webpack(config, { isServer, dev }) {
// @mui/joy: anything material gets redirected to Joy
config.resolve.alias['@mui/material'] = '@mui/joy';
// @dqbd/tiktoken: enable asynchronous WebAssembly
config.experiments = {
asyncWebAssembly: true,
layers: true,
};
return config;
},
};
module.exports = nextConfig;
-46
View File
@@ -1,46 +0,0 @@
/** @type {import('next').NextConfig} */
let nextConfig = {
reactStrictMode: true,
// Note: disabled to chech whether the project becomes slower with this
// modularizeImports: {
// '@mui/icons-material': {
// transform: '@mui/icons-material/{{member}}',
// },
// },
// [puppeteer] https://github.com/puppeteer/puppeteer/issues/11052
experimental: {
serverComponentsExternalPackages: ['puppeteer-core'],
},
webpack: (config, _options) => {
// @mui/joy: anything material gets redirected to Joy
config.resolve.alias['@mui/material'] = '@mui/joy';
// @dqbd/tiktoken: enable asynchronous WebAssembly
config.experiments = {
asyncWebAssembly: true,
layers: true,
};
return config;
},
// Uncomment the following leave console messages in production
// compiler: {
// removeConsole: false,
// },
};
// Validate environment variables, if set at build time. Will be actually read and used at runtime.
// This is the reason both this file and the servr/env.mjs files have this extension.
await import('./src/server/env.mjs');
// conditionally enable the nextjs bundle analyzer
if (process.env.ANALYZE_BUNDLE) {
const { default: withBundleAnalyzer } = await import('@next/bundle-analyzer');
nextConfig = withBundleAnalyzer({ openAnalyzer: true })(nextConfig);
}
export default nextConfig;
+1680 -3249
View File
File diff suppressed because it is too large Load Diff
+30 -64
View File
@@ -1,80 +1,46 @@
{
"name": "big-agi",
"version": "1.13.0",
"version": "0.9.1",
"private": true,
"engines": {
"node": ">=18.0.0"
},
"scripts": {
"dev": "next dev",
"build": "next build",
"start": "next start",
"lint": "next lint",
"postinstall": "prisma generate",
"db:push": "prisma db push",
"db:studio": "prisma studio",
"vercel:env:pull": "npx vercel env pull .env.development.local"
"lint": "next lint"
},
"dependencies": {
"@emotion/cache": "^11.11.0",
"@emotion/react": "^11.11.3",
"@emotion/server": "^11.11.0",
"@emotion/styled": "^11.11.0",
"@mui/icons-material": "^5.15.8",
"@mui/joy": "5.0.0-beta.25",
"@next/bundle-analyzer": "^14.1.0",
"@prisma/client": "^5.9.1",
"@sanity/diff-match-patch": "^3.1.1",
"@t3-oss/env-nextjs": "^0.8.0",
"@tanstack/react-query": "~4.36.1",
"@trpc/client": "10.44.1",
"@trpc/next": "10.44.1",
"@trpc/react-query": "10.44.1",
"@trpc/server": "10.44.1",
"@vercel/analytics": "^1.1.3",
"@vercel/speed-insights": "^1.0.9",
"browser-fs-access": "^0.35.0",
"eventsource-parser": "^1.1.1",
"idb-keyval": "^6.2.1",
"next": "^14.1.0",
"nprogress": "^0.2.0",
"pdfjs-dist": "4.0.379",
"plantuml-encoder": "^1.4.0",
"@dqbd/tiktoken": "^1.0.7",
"@emotion/react": "^11.10.8",
"@emotion/server": "^11.10.0",
"@emotion/styled": "^11.10.8",
"@mui/icons-material": "^5.11.16",
"@mui/joy": "^5.0.0-alpha.77",
"@tanstack/react-query": "^4.29.5",
"@vercel/analytics": "^1.0.0",
"eventsource-parser": "^1.0.0",
"next": "^13.3.2",
"pdfjs-dist": "^3.5.141",
"next-auth": "^4.21.1",
"prismjs": "^1.29.0",
"react": "^18.2.0",
"react-beautiful-dnd": "^13.1.1",
"react-csv": "^2.2.2",
"react-dom": "^18.2.0",
"react-katex": "^3.0.1",
"react-markdown": "^9.0.1",
"react-player": "^2.14.1",
"react-resizable-panels": "^2.0.3",
"react-timeago": "^7.2.0",
"remark-gfm": "^4.0.0",
"superjson": "^2.2.1",
"tesseract.js": "^5.0.4",
"tiktoken": "^1.0.13",
"uuid": "^9.0.1",
"zod": "^3.22.4",
"zustand": "^4.5.0"
"react-markdown": "^8.0.7",
"remark-gfm": "^3.0.1",
"uuid": "^9.0.0",
"zustand": "^4.3.7"
},
"devDependencies": {
"@cloudflare/puppeteer": "^0.0.5",
"@types/node": "^20.11.16",
"@types/nprogress": "^0.2.3",
"@types/plantuml-encoder": "^1.4.2",
"@types/prismjs": "^1.26.3",
"@types/react": "^18.2.55",
"@types/react-beautiful-dnd": "^13.1.8",
"@types/react-csv": "^1.1.10",
"@types/react-dom": "^18.2.18",
"@types/react-katex": "^3.0.4",
"@types/react-timeago": "^4.1.7",
"@types/uuid": "^9.0.8",
"eslint": "^8.56.0",
"eslint-config-next": "^14.1.0",
"prettier": "^3.2.5",
"prisma": "^5.9.1",
"typescript": "^5.3.3"
},
"engines": {
"node": "^20.0.0 || ^18.0.0"
"@types/node": "^18.16.3",
"@types/prismjs": "^1.26.0",
"@types/react": "^18.2.0",
"@types/react-dom": "^18.2.1",
"@types/uuid": "^9.0.1",
"eslint": "^8.39.0",
"eslint-config-next": "^13.3.2",
"prettier": "^2.8.8",
"typescript": "^5.0.4"
}
}
+38 -44
View File
@@ -1,53 +1,47 @@
import * as React from 'react';
import Head from 'next/head';
import { MyAppProps } from 'next/app';
import { Analytics as VercelAnalytics } from '@vercel/analytics/react';
import { SpeedInsights as VercelSpeedInsights } from '@vercel/speed-insights/next';
import { AppProps } from 'next/app';
import { CacheProvider, EmotionCache } from '@emotion/react';
import { CssBaseline, CssVarsProvider } from '@mui/joy';
import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
import { Session as NextAuthSession } from 'next-auth';
import { SessionProvider } from 'next-auth/react';
import '@/common/styles/GithubMarkdown.css';
import { Brand } from '@/common/brand';
import { createEmotionCache, theme } from '@/common/theme';
import { Brand } from '~/common/app.config';
import { apiQuery } from '~/common/util/trpc.client';
// Client-side cache, shared for the whole session of the user in the browser.
const clientSideEmotionCache = createEmotionCache();
import 'katex/dist/katex.min.css';
import '~/common/styles/CodePrism.css';
import '~/common/styles/GithubMarkdown.css';
import '~/common/styles/NProgress.css';
import '~/common/styles/app.styles.css';
import { ProviderBackendAndNoSSR } from '~/common/providers/ProviderBackendAndNoSSR';
import { ProviderBootstrapLogic } from '~/common/providers/ProviderBootstrapLogic';
import { ProviderSingleTab } from '~/common/providers/ProviderSingleTab';
import { ProviderSnacks } from '~/common/providers/ProviderSnacks';
import { ProviderTRPCQueryClient } from '~/common/providers/ProviderTRPCQueryClient';
import { ProviderTheming } from '~/common/providers/ProviderTheming';
const MyApp = ({ Component, emotionCache, pageProps }: MyAppProps) =>
<>
<Head>
<title>{Brand.Title.Common}</title>
<meta name='viewport' content='minimum-scale=1, initial-scale=1, width=device-width, shrink-to-fit=no' />
</Head>
<ProviderTheming emotionCache={emotionCache}>
<ProviderSingleTab>
<ProviderBootstrapLogic>
<ProviderTRPCQueryClient>
<ProviderSnacks>
<ProviderBackendAndNoSSR>
<Component {...pageProps} />
</ProviderBackendAndNoSSR>
</ProviderSnacks>
</ProviderTRPCQueryClient>
</ProviderBootstrapLogic>
</ProviderSingleTab>
</ProviderTheming>
export interface MyAppProps extends AppProps {
emotionCache?: EmotionCache;
session?: NextAuthSession;
}
export default function MyApp({ Component, emotionCache = clientSideEmotionCache, pageProps: { session, ...pageProps } }: MyAppProps) {
const [queryClient] = React.useState(() => new QueryClient());
return <>
<CacheProvider value={emotionCache}>
<Head>
<title>{Brand.Title.Common}</title>
<meta name='viewport' content='minimum-scale=1, initial-scale=1, width=device-width, shrink-to-fit=no' />
</Head>
{/* Next-Auth provider */}
<SessionProvider session={session}>
{/* Rect-query provider */}
<QueryClientProvider client={queryClient}>
{/* JoyUI/Emotion */}
<CssVarsProvider defaultMode='light' theme={theme}>
{/* CssBaseline kickstart an elegant, consistent, and simple baseline to build upon. */}
<CssBaseline />
<Component {...pageProps} />
</CssVarsProvider>
</QueryClientProvider>
</SessionProvider>
</CacheProvider>
<VercelAnalytics debug={false} />
<VercelSpeedInsights debug={false} sampleRate={1 / 10} />
</>;
// enables the React Query API invocation
export default apiQuery.withTRPC(MyApp);
}
+13 -11
View File
@@ -1,36 +1,38 @@
import * as React from 'react';
import { AppType, MyAppProps } from 'next/app';
import { AppType } from 'next/app';
import { default as Document, DocumentContext, DocumentProps, Head, Html, Main, NextScript } from 'next/document';
import createEmotionServer from '@emotion/server/create-instance';
import { getInitColorSchemeScript } from '@mui/joy/styles';
import { Brand } from '~/common/app.config';
import { createEmotionCache } from '~/common/app.theme';
import { Brand } from '@/common/brand';
import { MyAppProps } from './_app';
import { bodyFontClassName, createEmotionCache } from '@/common/theme';
interface MyDocumentProps extends DocumentProps {
emotionStyleTags: React.JSX.Element[];
emotionStyleTags: JSX.Element[];
}
export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
return (
<Html lang='en'>
<Html lang='en' className={bodyFontClassName}>
<Head>
{/* Meta (missing Title, set by the App or Page) */}
<meta name='description' content={Brand.Meta.Description} />
<meta name='keywords' content={Brand.Meta.Keywords} />
<meta name='theme-color' content={Brand.Meta.ThemeColor} />
{/* Favicons & PWA */}
<link rel='shortcut icon' href='/favicon.ico' />
<link rel='icon' type='image/png' sizes='32x32' href='/icons/favicon-32x32.png' />
<link rel='icon' type='image/png' sizes='16x16' href='/icons/favicon-16x16.png' />
<link rel='apple-touch-icon' sizes='180x180' href='/apple-touch-icon.png' />
<link rel='apple-touch-icon' sizes='180x180' href='/icons/apple-touch-icon.png' />
<link rel='manifest' href='/manifest.json' />
<meta name='apple-mobile-web-app-capable' content='yes' />
<meta name='apple-mobile-web-app-status-bar-style' content='black' />
{/* Opengraph */}
<meta property='og:title' content={Brand.Title.Common} />
<meta property='og:title' content={Brand.Meta.Title} />
<meta property='og:description' content={Brand.Meta.Description} />
{Brand.URIs.CardImage && <meta property='og:image' content={Brand.URIs.CardImage} />}
<meta property='og:url' content={Brand.URIs.Home} />
@@ -40,7 +42,7 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
{/* Twitter */}
<meta property='twitter:card' content='summary_large_image' />
<meta property='twitter:url' content={Brand.URIs.Home} />
<meta property='twitter:title' content={Brand.Title.Common} />
<meta property='twitter:title' content={Brand.Meta.Title} />
<meta property='twitter:description' content={Brand.Meta.Description} />
{Brand.URIs.CardImage && <meta property='twitter:image' content={Brand.URIs.CardImage} />}
<meta name='twitter:site' content={Brand.Meta.TwitterSite} />
@@ -51,9 +53,9 @@ export default function MyDocument({ emotionStyleTags }: MyDocumentProps) {
{emotionStyleTags}
</Head>
<body>
{getInitColorSchemeScript()}
<Main />
<NextScript />
{getInitColorSchemeScript()}
<Main />
<NextScript />
</body>
</Html>
);
+20
View File
@@ -0,0 +1,20 @@
import { NextApiRequest, NextApiResponse } from 'next';
import { default as NextAuth } from 'next-auth';
import { authBasicUsers, authCreateProviders, authType } from '@/modules/authentication/auth.server';
const authOptions = {
secret: process.env.NEXTAUTH_SECRET,
providers: authCreateProviders(),
};
export default function handler(req: NextApiRequest, res: NextApiResponse) {
if (!authType)
return res.status(200).send('Auth not enabled');
if (Object.keys(authBasicUsers).length <= 0)
res.status(200).send('Auth enabled but no users have been set up');
return NextAuth(req, res, authOptions);
}
+77
View File
@@ -0,0 +1,77 @@
import { NextRequest, NextResponse } from 'next/server';
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
function parseApiParameters(apiKey?: string) {
return {
apiHost: (process.env.ELEVENLABS_API_HOST || 'api.elevenlabs.io').trim().replaceAll('https://', ''),
apiHeaders: {
'Content-Type': 'application/json',
'xi-api-key': (apiKey || process.env.ELEVENLABS_API_KEY || '').trim(),
},
};
}
async function rethrowElevenLabsError(response: Response) {
if (!response.ok) {
let errorPayload: object | null = null;
try {
errorPayload = await response.json();
} catch (e) {
// ignore
}
console.error('Error in ElevenLabs API:', errorPayload);
throw new Error('ElevenLabs error: ' + JSON.stringify(errorPayload));
}
}
export async function getFromElevenLabs<TJson extends object>(apiKey: string, apiPath: string): Promise<TJson> {
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
const response = await fetch(`https://${apiHost}${apiPath}`, {
method: 'GET',
headers: apiHeaders,
});
await rethrowElevenLabsError(response);
return await response.json();
}
export async function postToElevenLabs<TBody extends object>(apiKey: string, apiPath: string, body: TBody, signal?: AbortSignal): Promise<Response> {
const { apiHost, apiHeaders } = parseApiParameters(apiKey);
const response = await fetch(`https://${apiHost}${apiPath}`, {
method: 'POST',
headers: apiHeaders,
body: JSON.stringify(body),
signal,
});
await rethrowElevenLabsError(response);
return response;
}
export default async function handler(req: NextRequest) {
try {
const { apiKey = '', text, voiceId: userVoiceId, nonEnglish } = (await req.json()) as ElevenLabs.API.TextToSpeech.RequestBody;
const voiceId = userVoiceId || process.env.ELEVENLABS_VOICE_ID || '21m00Tcm4TlvDq8ikWAM';
const requestPayload: ElevenLabs.Wire.TextToSpeech.Request = {
text: text,
...(nonEnglish ? { model_id: 'eleven_multilingual_v1' } : {}),
};
const response = await postToElevenLabs<ElevenLabs.Wire.TextToSpeech.Request>(apiKey, `/v1/text-to-speech/${voiceId}`, requestPayload);
const audioBuffer: ElevenLabs.API.TextToSpeech.Response = await response.arrayBuffer();
return new NextResponse(audioBuffer, { status: 200, headers: { 'Content-Type': 'audio/mpeg' } });
} catch (error) {
console.error('Error posting to ElevenLabs', error);
return new NextResponse(JSON.stringify(`speechToText error: ${error?.toString() || 'Network issue'}`), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+48
View File
@@ -0,0 +1,48 @@
import { NextRequest, NextResponse } from 'next/server';
import { ElevenLabs } from '@/modules/elevenlabs/elevenlabs.types';
import { getFromElevenLabs } from './speech';
export default async function handler(req: NextRequest) {
try {
const { apiKey = '' } = (await req.json()) as ElevenLabs.API.Voices.RequestBody;
const voicesList = await getFromElevenLabs<ElevenLabs.Wire.Voices.List>(apiKey, '/v1/voices');
// bring category != 'premade to the top
voicesList.voices.sort((a, b) => {
if (a.category === 'premade' && b.category !== 'premade') return 1;
if (a.category !== 'premade' && b.category === 'premade') return -1;
return 0;
});
// map to our own response format
const response: ElevenLabs.API.Voices.Response = {
voices: voicesList.voices.map((voice, idx) => ({
id: voice.voice_id,
name: voice.name,
description: voice.description,
previewUrl: voice.preview_url,
category: voice.category,
default: idx === 0,
})),
};
return new NextResponse(JSON.stringify(response), { status: 200, headers: { 'Content-Type': 'application/json' } });
} catch (error) {
console.error('Error fetching voices from ElevenLabs:', error);
return new NextResponse(
JSON.stringify({
type: 'error',
error: error?.toString() || error || 'Network issue',
}),
{ status: 500 },
);
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+27
View File
@@ -0,0 +1,27 @@
import { NextRequest, NextResponse } from 'next/server';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiPost, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
export default async function handler(req: NextRequest) {
try {
const requestBodyJson = await req.json();
const { api, ...rest } = await toApiChatRequest(requestBodyJson);
const upstreamRequest: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(rest, false);
const upstreamResponse: OpenAI.Wire.Chat.CompletionResponse = await openaiPost(api, '/v1/chat/completions', upstreamRequest);
return new NextResponse(JSON.stringify({
message: upstreamResponse.choices[0].message,
} satisfies OpenAI.API.Chat.Response));
} catch (error: any) {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+30
View File
@@ -0,0 +1,30 @@
import { NextRequest, NextResponse } from 'next/server';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiGet, toApiChatRequest } from '@/modules/openai/openai.server';
export default async function handler(req: NextRequest): Promise<NextResponse> {
try {
// FIXME: this is currently broken, the "extractOpenAIChatInputs" is expecting messages/modelId, which we don't have here
// keep working on this
const requestBodyJson = await req.json();
const { api } = await toApiChatRequest(requestBodyJson);
const wireModels = await openaiGet<OpenAI.Wire.Models.Response>(api, '/v1/models');
// flatten IDs (most recent first)
return new NextResponse(JSON.stringify({
models: wireModels.data.map((model) => ({ id: model.id, created: model.created })),
} satisfies OpenAI.API.Models.Response));
} catch (error: any) {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+117
View File
@@ -0,0 +1,117 @@
import { NextRequest, NextResponse } from 'next/server';
import { createParser } from 'eventsource-parser';
import { OpenAI } from '@/modules/openai/openai.types';
import { openaiPostResponse, toApiChatRequest, toWireCompletionRequest } from '@/modules/openai/openai.server';
async function chatStreamRepeater(input: OpenAI.API.Chat.Request, signal: AbortSignal): Promise<ReadableStream> {
// Handle the abort event when the connection is closed by the client
signal.addEventListener('abort', () => {
console.log('Client closed the connection.');
});
// begin event streaming from the OpenAI API
const encoder = new TextEncoder();
let upstreamResponse: Response;
try {
const request: OpenAI.Wire.Chat.CompletionRequest = toWireCompletionRequest(input, true);
upstreamResponse = await openaiPostResponse(input.api, '/v1/chat/completions', request, signal);
} catch (error: any) {
console.log(error);
const message = '[OpenAI Issue] ' + (error?.message || typeof error === 'string' ? error : JSON.stringify(error)) + (error?.cause ? ' · ' + error.cause : '');
return new ReadableStream({
start: controller => {
controller.enqueue(encoder.encode(message));
controller.close();
},
});
}
// decoding and re-encoding loop
const onReadableStreamStart = async (controller: ReadableStreamDefaultController) => {
let hasBegun = false;
// stream response (SSE) from OpenAI is split into multiple chunks. this function
// will parse the event into a text stream, and re-emit it to the client
const upstreamParser = createParser(event => {
// ignore reconnect interval
if (event.type !== 'event')
return;
// https://beta.openai.com/docs/api-reference/completions/create#completions/create-stream
if (event.data === '[DONE]') {
controller.close();
return;
}
try {
const json: OpenAI.Wire.Chat.CompletionResponseChunked = JSON.parse(event.data);
// ignore any 'role' delta update
if (json.choices[0].delta?.role)
return;
// stringify and send the first packet as a JSON object
if (!hasBegun) {
hasBegun = true;
const firstPacket: OpenAI.API.Chat.StreamingFirstResponse = {
model: json.model,
};
controller.enqueue(encoder.encode(JSON.stringify(firstPacket)));
}
// transmit the text stream
const text = json.choices[0].delta?.content || '';
controller.enqueue(encoder.encode(text));
} catch (error) {
// maybe parse error
console.error('Error parsing OpenAI response', error);
controller.error(error);
}
});
// https://web.dev/streams/#asynchronous-iteration
const decoder = new TextDecoder();
for await (const upstreamChunk of upstreamResponse.body as any)
upstreamParser.feed(decoder.decode(upstreamChunk, { stream: true }));
};
return new ReadableStream({
start: onReadableStreamStart,
cancel: (reason) => console.log('chatStreamRepeater cancelled', reason),
});
}
export default async function handler(req: NextRequest): Promise<Response> {
try {
const requestBodyJson = await req.json();
const chatRequest: OpenAI.API.Chat.Request = await toApiChatRequest(requestBodyJson);
const chatResponseStream: ReadableStream = await chatStreamRepeater(chatRequest, req.signal);
return new NextResponse(chatResponseStream);
} catch (error: any) {
if (error.name === 'AbortError') {
console.log('Fetch request aborted in handler');
return new Response('Request aborted by the user.', { status: 499 }); // Use 499 status code for client closed request
} else if (error.code === 'ECONNRESET') {
console.log('Connection reset by the client in handler');
return new Response('Connection reset by the client.', { status: 499 }); // Use 499 status code for client closed request
} else {
console.error('Fetch request failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
};
//noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+88
View File
@@ -0,0 +1,88 @@
// noinspection ExceptionCaughtLocallyJS
import { NextRequest, NextResponse } from 'next/server';
import { Prodia } from '@/modules/prodia/prodia.types';
export const prodiaHeaders = (apiKey: string): Record<string, string> => ({
'X-Prodia-Key': (apiKey || process.env.PRODIA_API_KEY || '').trim(),
});
async function createGenerationJob(apiKey: string, jobRequest: Prodia.Wire.Imagine.JobRequest): Promise<Prodia.Wire.Imagine.JobResponse> {
const response = await fetch('https://api.prodia.com/v1/job', {
method: 'POST',
headers: {
...prodiaHeaders(apiKey),
'Content-Type': 'application/json',
},
body: JSON.stringify(jobRequest),
});
if (response.status !== 200) {
console.log('Bad Prodia Response:', await response.text());
throw new Error(`Bad Prodia Response: ${response.status}`);
}
return await response.json();
}
async function getJobStatus(apiKey: string, jobId: string): Promise<Prodia.Wire.Imagine.JobResponse> {
const response = await fetch(`https://api.prodia.com/v1/job/${jobId}`, {
headers: prodiaHeaders(apiKey),
});
if (response.status !== 200)
throw new Error(`Bad Prodia Response: ${response.status}`);
return await response.json();
}
export default async function handler(req: NextRequest) {
// timeout, in seconds
const timeout = 15;
const tStart = Date.now();
try {
const { apiKey = '', prompt, prodiaModelId, negativePrompt, steps, cfgScale, seed } = (await req.json()) as Prodia.API.Imagine.RequestBody;
// crate the job, getting back a job ID
const jobRequest: Prodia.Wire.Imagine.JobRequest = {
model: prodiaModelId,
prompt,
...(!!cfgScale && { cfg_scale: cfgScale }),
...(!!steps && { steps }),
...(!!negativePrompt && { negative_prompt: negativePrompt }),
...(!!seed && { seed }),
};
let job: Prodia.Wire.Imagine.JobResponse = await createGenerationJob(apiKey, jobRequest);
// poll the job status until it's done
let sleepDelay = 2000;
while (job.status !== 'succeeded' && job.status !== 'failed' && (Date.now() - tStart) < (timeout * 1000)) {
await new Promise(resolve => setTimeout(resolve, sleepDelay));
job = await getJobStatus(apiKey, job.job);
if (sleepDelay > 250)
sleepDelay /= 2;
}
// check for success
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
if (job.status !== 'succeeded' || !job.imageUrl)
throw new Error(`Prodia image generation failed within ${elapsed}s`);
// respond with the image URL
const altText = `Prodia generated "${jobRequest.prompt}". Options: ${JSON.stringify({ seed: job.params })}.`;
const response: Prodia.API.Imagine.Response = { status: 'success', imageUrl: job.imageUrl, altText, elapsed };
return new NextResponse(JSON.stringify(response));
} catch (error) {
console.error('Handler failed:', error);
const elapsed = Math.round((Date.now() - tStart) / 100) / 10;
const response: Prodia.API.Imagine.Response = { status: 'error', error: error?.toString() || 'Network issue', elapsed };
return new NextResponse(JSON.stringify(response), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+48
View File
@@ -0,0 +1,48 @@
import { NextRequest, NextResponse } from 'next/server';
import { Prodia } from '@/modules/prodia/prodia.types';
// for lack of an API
const HARDCODED_MODELS: Prodia.API.Models.Response = {
models: [
{ id: 'sdv1_4.ckpt [7460a6fa]', label: 'Stable Diffusion 1.4', priority: 8 },
{ id: 'v1-5-pruned-emaonly.ckpt [81761151]', label: 'Stable Diffusion 1.5', priority: 9 },
{ id: 'anythingv3_0-pruned.ckpt [2700c435]', label: 'Anything V3.0' },
{ id: 'anything-v4.5-pruned.ckpt [65745d25]', label: 'Anything V4.5' },
{ id: 'analog-diffusion-1.0.ckpt [9ca13f02]', label: 'Analog Diffusion' },
{ id: 'theallys-mix-ii-churned.safetensors [5d9225a4]', label: `TheAlly's Mix II` },
{ id: 'elldreths-vivid-mix.safetensors [342d9d26]', label: `Elldreth's Vivid Mix` },
{ id: 'deliberate_v2.safetensors [10ec4b29]', label: 'Deliberate V2', priority: 5 },
{ id: 'openjourney_V4.ckpt [ca2f377f]', label: 'Openjourney v4' },
{ id: 'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]', label: 'Dreamlike Diffusion' },
{ id: 'dreamlike-diffusion-2.0.safetensors [fdcf65e7]', label: 'Dreamlike Diffusion 2' },
{ id: 'portrait+1.0.safetensors [1400e684]', label: 'Portrait' },
{ id: 'riffusion-model-v1.ckpt [3aafa6fe]', label: 'Riffusion' },
{ id: 'timeless-1.0.ckpt [7c4971d4]', label: 'Timeless' },
{ id: 'dreamshaper_5BakedVae.safetensors [a3fbf318]', label: 'Dreamshaper 5' },
{ id: 'revAnimated_v122.safetensors [3f4fefd9]', label: 'ReV Animated V1.2.2' },
{ id: 'meinamix_meinaV9.safetensors [2ec66ab0]', label: 'MeinaMix Meina V9' },
],
};
// sort by priority
HARDCODED_MODELS.models.sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));
export default async function handler(req: NextRequest): Promise<NextResponse> {
try {
// this is ignored for now, as there's not an API - but still we want to be able to use it in the future
// noinspection JSUnusedLocalSymbols
const { apiKey = '' } = (await req.json()) as Prodia.API.Models.RequestBody;
return new NextResponse(JSON.stringify(HARDCODED_MODELS));
} catch (error: any) {
console.error('Handler failed:', error);
return new NextResponse(`[Issue] ${error}`, { status: 400 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+50
View File
@@ -0,0 +1,50 @@
// noinspection ExceptionCaughtLocallyJS
import { NextRequest, NextResponse } from 'next/server';
import { PasteGG } from '@/modules/pastegg/pastegg.types';
import { pasteGgPost } from '@/modules/pastegg/pastegg.server';
/**
* 'Proxy' that uploads a file to paste.gg.
* Called by the UI to avoid CORS issues, as the browser cannot post directly to paste.gg.
*/
export default async function handler(req: NextRequest) {
try {
const { to, title, fileContent, fileName, origin }: PasteGG.API.Publish.RequestBody = await req.json();
if (req.method !== 'POST' || to !== 'paste.gg' || !title || !fileContent || !fileName)
throw new Error('Invalid options');
const paste = await pasteGgPost(title, fileName, fileContent, origin);
console.log(`Posted to paste.gg`, paste);
if (paste?.status !== 'success')
throw new Error(`${paste?.error || 'Unknown error'}: ${paste?.message || 'Paste.gg Error'}`);
return new NextResponse(JSON.stringify({
type: 'success',
url: `https://paste.gg/${paste.result.id}`,
expires: paste.result.expires || 'never',
deletionKey: paste.result.deletion_key || 'none',
created: paste.result.created_at,
} satisfies PasteGG.API.Publish.Response));
} catch (error) {
console.error('Error posting to paste.gg', error);
return new NextResponse(JSON.stringify({
type: 'error',
error: error?.toString() || 'Network issue',
} satisfies PasteGG.API.Publish.Response), { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
+47
View File
@@ -0,0 +1,47 @@
import { NextRequest, NextResponse } from 'next/server';
import { Search } from '@/modules/search/search.types';
import { objectToQueryString } from '@/modules/search/search.client';
export default async function handler(req: NextRequest): Promise<NextResponse> {
const { searchParams } = new URL(req.url);
const customSearchParams: Search.Wire.RequestParams = {
q: searchParams.get('query') || '',
cx: searchParams.get('cx') || process.env.GOOGLE_CSE_ID,
key: searchParams.get('key') || process.env.GOOGLE_CLOUD_API_KEY,
num: 5,
};
try {
if (!customSearchParams.key || !customSearchParams.cx) {
// noinspection ExceptionCaughtLocallyJS
throw new Error('Missing API Key or Custom Search Engine ID');
}
const wireResponse = await fetch(`https://www.googleapis.com/customsearch/v1?${objectToQueryString(customSearchParams)}`);
const data: Search.Wire.SearchResponse & { error?: { message?: string } } = await wireResponse.json();
if (data.error) {
// noinspection ExceptionCaughtLocallyJS
throw new Error(`Google Custom Search API error: ${data.error?.message}`);
}
const apiResponse: Search.API.Response = data.items?.map((result): Search.API.BriefResult => ({
title: result.title,
link: result.link,
snippet: result.snippet,
})) || [];
return new NextResponse(JSON.stringify(apiResponse));
} catch (error: any) {
console.error('Handler failed:', error);
return new NextResponse(`A search error occurred: ${error}`, { status: 500 });
}
}
// noinspection JSUnusedGlobalSymbols
export const config = {
runtime: 'edge',
};
-10
View File
@@ -1,10 +0,0 @@
import * as React from 'react';
import { AppCall } from '../src/apps/call/AppCall';
import { withLayout } from '~/common/layout/withLayout';
export default function CallPage() {
return withLayout({ type: 'optima' }, <AppCall />);
}
-10
View File
@@ -1,10 +0,0 @@
import * as React from 'react';
import { AppDraw } from '../src/apps/draw/AppDraw';
import { withLayout } from '~/common/layout/withLayout';
export default function DrawPage() {
return withLayout({ type: 'optima' }, <AppDraw />);
}
+45 -9
View File
@@ -1,17 +1,53 @@
import * as React from 'react';
import { AppChat } from '../src/apps/chat/AppChat';
import { useRedirectToNewsOnUpdates } from '../src/apps/news/news.hooks';
import { Container, useTheme } from '@mui/joy';
import { withLayout } from '~/common/layout/withLayout';
import { NoSSR } from '@/common/components/NoSSR';
import { isValidOpenAIApiKey } from '@/modules/openai/openai.client';
import { useSettingsStore } from '@/common/state/store-settings';
import { Chat } from '../src/apps/chat/Chat';
import { SettingsModal } from '../src/apps/settings/SettingsModal';
export default function IndexPage() {
// show the News page if there are unseen updates
useRedirectToNewsOnUpdates();
export default function Home() {
// state
const [settingsShown, setSettingsShown] = React.useState(false);
// TODO: This Index page will point to the Dashboard (or a landing page) soon
// For now it offers the chat experience, but this will change. #299
// external state
const theme = useTheme();
const apiKey = useSettingsStore(state => state.apiKey);
const centerMode = useSettingsStore(state => state.centerMode);
return withLayout({ type: 'optima' }, <AppChat />);
// show the Settings Dialog at startup if the API key is required but not set
React.useEffect(() => {
if (!process.env.HAS_SERVER_KEY_OPENAI && !isValidOpenAIApiKey(apiKey))
setSettingsShown(true);
}, [apiKey]);
return (
/**
* Note the global NoSSR wrapper
* - Even the overall container could have hydration issues when using localStorage and non-default maxWidth
*/
<NoSSR>
<Container maxWidth={centerMode === 'full' ? false : centerMode === 'narrow' ? 'md' : 'xl'} disableGutters sx={{
boxShadow: {
xs: 'none',
md: centerMode === 'narrow' ? theme.vars.shadow.md : 'none',
xl: centerMode !== 'full' ? theme.vars.shadow.lg : 'none',
},
}}>
<Chat onShowSettings={() => setSettingsShown(true)} />
<SettingsModal open={settingsShown} onClose={() => setSettingsShown(false)} />
</Container>
</NoSSR>
);
}
-90
View File
@@ -1,90 +0,0 @@
import * as React from 'react';
import { Box, Typography } from '@mui/joy';
import { useModelsStore } from '~/modules/llms/store-llms';
import { InlineError } from '~/common/components/InlineError';
import { apiQuery } from '~/common/util/trpc.client';
import { navigateToIndex, useRouterQuery } from '~/common/app.routes';
import { withLayout } from '~/common/layout/withLayout';
function CallbackOpenRouterPage(props: { openRouterCode: string | undefined }) {
// external state
const { data, isError, error, isLoading } = apiQuery.backend.exchangeOpenRouterKey.useQuery({ code: props.openRouterCode || '' }, {
enabled: !!props.openRouterCode,
refetchOnWindowFocus: false,
staleTime: Infinity,
});
// derived state
const isErrorInput = !props.openRouterCode;
const openRouterKey = data?.key ?? undefined;
const isSuccess = !!openRouterKey;
// Success: save the key and redirect to the chat app
React.useEffect(() => {
if (!isSuccess)
return;
// 1. Save the key as the client key
useModelsStore.getState().setOpenRoutersKey(openRouterKey);
// 2. Navigate to the chat app
void navigateToIndex(true); //.then(openModelsSetup);
}, [isSuccess, openRouterKey]);
return (
<Box sx={{
flexGrow: 1,
overflowY: 'auto',
display: 'flex', justifyContent: 'center',
p: { xs: 3, md: 6 },
}}>
<Box sx={{
// my: 'auto',
display: 'flex', flexDirection: 'column', alignItems: 'center',
gap: 4,
}}>
<Typography level='title-lg'>
Welcome Back
</Typography>
{isLoading && <Typography level='body-sm'>Loading...</Typography>}
{isErrorInput && <InlineError error='There was an issue retrieving the code from OpenRouter.' />}
{isError && <InlineError error={error} />}
{data && (
<Typography level='body-md'>
Success! You can now close this window.
</Typography>
)}
</Box>
</Box>
);
}
/**
* This page will be invoked by OpenRouter as a Callback
*
* Docs: https://openrouter.ai/docs#oauth
* Example URL: https://localhost:3000/link/callback_openrouter?code=SomeCode
*/
export default function CallbackPage() {
// external state - get the 'code=...' from the URL
const { code } = useRouterQuery<{ code: string | undefined }>();
return withLayout({ type: 'plain' }, <CallbackOpenRouterPage openRouterCode={code} />);
}
-15
View File
@@ -1,15 +0,0 @@
import * as React from 'react';
import { AppLinkChat } from '../../../src/apps/link/AppLinkChat';
import { useRouterQuery } from '~/common/app.routes';
import { withLayout } from '~/common/layout/withLayout';
export default function ChatLinkPage() {
// external state
const { chatLinkId } = useRouterQuery<{ chatLinkId: string | undefined }>();
return withLayout({ type: 'optima', suspendAutoModelsSetup: true }, <AppLinkChat chatLinkId={chatLinkId || null} />);
}
-137
View File
@@ -1,137 +0,0 @@
import * as React from 'react';
import { Alert, Box, Button, Typography } from '@mui/joy';
import ArrowBackIcon from '@mui/icons-material/ArrowBack';
import { setComposerStartupText } from '../../src/apps/chat/components/composer/store-composer';
import { callBrowseFetchPage } from '~/modules/browse/browse.client';
import { LogoProgress } from '~/common/components/LogoProgress';
import { asValidURL } from '~/common/util/urlUtils';
import { navigateToIndex, useRouterQuery } from '~/common/app.routes';
import { withLayout } from '~/common/layout/withLayout';
/**
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
* See the /public/manifest.json for how this is configured. Parameters:
* - text: the text to share
* - url: the URL to share
* - if the URL is a valid URL, it will be downloaded and the content will be shared
* - if the URL is not a valid URL, it will be shared as text
* - title: the title of the shared content
*/
function AppShareTarget() {
// state
const [errorMessage, setErrorMessage] = React.useState<string | null>(null);
const [intentText, setIntentText] = React.useState<string | null>(null);
const [intentURL, setIntentURL] = React.useState<string | null>(null);
const [isDownloading, setIsDownloading] = React.useState(false);
// external state
const { url: queryUrl, text: queryText } = useRouterQuery<{
url: string | string[] | undefined,
text: string | string[] | undefined,
}>();
const queueComposerTextAndLaunchApp = React.useCallback((text: string) => {
setComposerStartupText(text);
void navigateToIndex(true);
}, []);
// Detect the share Intent from the query
React.useEffect(() => {
// skip when query is not parsed yet
let queryTextItem = queryUrl || queryText || null;
if (!queryTextItem)
return;
// single item from the query
if (Array.isArray(queryTextItem))
queryTextItem = queryTextItem[0];
// check if the item is a URL
const url = asValidURL(queryTextItem);
if (url)
setIntentURL(url);
else if (queryTextItem)
setIntentText(queryTextItem);
else
setErrorMessage('No text or url. Received: ' + JSON.stringify({ queryText, queryUrl }));
}, [queryText, queryUrl]);
// Text -> Composer
React.useEffect(() => {
if (intentText)
queueComposerTextAndLaunchApp(intentText);
}, [intentText, queueComposerTextAndLaunchApp]);
// URL -> download -> Composer
React.useEffect(() => {
if (intentURL) {
setIsDownloading(true);
callBrowseFetchPage(intentURL)
.then(page => {
if (page.stopReason !== 'error')
queueComposerTextAndLaunchApp('\n\n```' + intentURL + '\n' + page.content + '\n```\n');
else
setErrorMessage('Could not read any data' + page.error ? ': ' + page.error : '');
})
.catch(error => setErrorMessage(error?.message || error || 'Unknown error'))
.finally(() => setIsDownloading(false));
}
}, [intentURL, queueComposerTextAndLaunchApp]);
return (
<Box sx={{
display: 'flex', flexDirection: 'column', alignItems: 'center', justifyContent: 'center',
flexGrow: 1,
}}>
{/* Logo with Circular Progress */}
<LogoProgress showProgress={isDownloading} />
{/* Title */}
<Typography level='title-lg' sx={{ mt: 2, mb: 1 }}>
{isDownloading ? 'Loading...' : errorMessage ? '' : intentURL ? 'Done' : 'Receiving...'}
</Typography>
{/* Possible Error */}
{errorMessage && <>
<Alert variant='soft' color='danger' sx={{ my: 1 }}>
<Typography>{errorMessage}</Typography>
</Alert>
<Button
variant='solid' color='danger'
onClick={() => navigateToIndex()}
endDecorator={<ArrowBackIcon />}
sx={{ mt: 2 }}
>
Cancel
</Button>
</>}
{/* URL under analysis */}
<Typography level='body-xs'>
{intentURL}
</Typography>
</Box>
);
}
/**
* This page will be invoked on mobile when sharing Text/URLs/Files from other APPs
* Example URL: https://localhost:3000/link/share_target?title=This+Title&text=https%3A%2F%2Fexample.com%2Fapp%2Fpath
*/
export default function ShareTargetPage() {
return withLayout({ type: 'plain' }, <AppShareTarget />);
}
-14
View File
@@ -1,14 +0,0 @@
import * as React from 'react';
import { AppNews } from '../src/apps/news/AppNews';
import { useMarkNewsAsSeen } from '../src/apps/news/news.hooks';
import { withLayout } from '~/common/layout/withLayout';
export default function NewsPage() {
// 'touch' the last seen news version
useMarkNewsAsSeen();
return withLayout({ type: 'optima', suspendAutoModelsSetup: true }, <AppNews />);
}
-10
View File
@@ -1,10 +0,0 @@
import * as React from 'react';
import { AppPersonas } from '../src/apps/personas/AppPersonas';
import { withLayout } from '~/common/layout/withLayout';
export default function PersonasPage() {
return withLayout({ type: 'optima' }, <AppPersonas />);
}
-12
View File
@@ -1,12 +0,0 @@
import * as React from 'react';
import { Box } from '@mui/joy';
// import { AppWorkspace } from '../src/apps/personas/AppWorkspace';
import { withLayout } from '~/common/layout/withLayout';
export default function PersonasPage() {
return withLayout({ type: 'optima' }, <Box />);
}
-63
View File
@@ -1,63 +0,0 @@
// Prisma is the ORM for server-side (API) access to the database
//
// This file defines the schema for the database.
// - make sure to run 'prisma generate' after making changes to this file
// - make sure to run 'prisma db push' to sync the remote database with the schema
//
// Database is optional: when the environment variables are not set, the database is not used at all,
// and the storage of data in Big-AGI is limited to client-side (browser) storage.
//
// The database is used for:
// - the 'sharing' function, to let users share the chats with each other
generator client {
provider = "prisma-client-js"
}
datasource db {
provider = "postgresql"
url = env("POSTGRES_PRISMA_URL") // uses connection pooling
directUrl = env("POSTGRES_URL_NON_POOLING") // uses a direct connection
}
//
// Storage of Linked Data
//
model LinkStorage {
id String @id @default(uuid())
ownerId String
visibility LinkStorageVisibility
dataType LinkStorageDataType
dataTitle String?
dataSize Int
data Json
upVotes Int @default(0)
downVotes Int @default(0)
flagsCount Int @default(0)
readCount Int @default(0)
writeCount Int @default(1)
// time-based expiration
expiresAt DateTime?
// manual deletion
deletionKey String
isDeleted Boolean @default(false)
deletedAt DateTime?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
}
enum LinkStorageVisibility {
PUBLIC
UNLISTED
PRIVATE
}
enum LinkStorageDataType {
CHAT_V1
}

Before

Width:  |  Height:  |  Size: 7.3 KiB

After

Width:  |  Height:  |  Size: 7.3 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 254 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 270 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 11 KiB

+4 -14
View File
@@ -1,8 +1,8 @@
{
"name": "big-AGI",
"short_name": "big-AGI",
"theme_color": "#32383E",
"background_color": "#9FA6AD",
"short_name": "AGI",
"theme_color": "#434356",
"background_color": "#B9B9C6",
"description": "Personal AGI App",
"display": "standalone",
"start_url": "/",
@@ -23,15 +23,5 @@
"sizes": "1024x1024",
"type": "image/png"
}
],
"share_target": {
"action": "/link/share_target",
"method": "GET",
"enctype": "application/x-www-form-urlencoded",
"params": {
"title": "title",
"text": "text",
"url": "url"
}
}
]
}
Binary file not shown.
Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More