Julian Bilcke
commited on
Commit
·
a2c0551
1
Parent(s):
0ed5b20
work on Groq support
Browse files- .env +7 -0
- README.md +22 -9
- package-lock.json +25 -0
- package.json +1 -0
- src/app/interface/page/index.tsx +15 -3
- src/app/interface/settings-dialog/defaultSettings.ts +3 -0
- src/app/interface/settings-dialog/getSettings.ts +3 -0
- src/app/interface/settings-dialog/localStorageKeys.ts +3 -0
- src/app/queries/predict.ts +5 -1
- src/app/queries/predictWithGroq.ts +28 -0
- src/lib/useOAuth.ts +16 -0
- src/types.ts +5 -0
.env
CHANGED
|
@@ -10,6 +10,7 @@ RENDERING_ENGINE="INFERENCE_API"
|
|
| 10 |
# - INFERENCE_ENDPOINT
|
| 11 |
# - INFERENCE_API
|
| 12 |
# - OPENAI
|
|
|
|
| 13 |
LLM_ENGINE="INFERENCE_API"
|
| 14 |
|
| 15 |
NEXT_PUBLIC_MAX_NB_PAGES="2"
|
|
@@ -43,6 +44,10 @@ AUTH_OPENAI_API_KEY=
|
|
| 43 |
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
|
| 44 |
AUTH_VIDEOCHAIN_API_TOKEN=
|
| 45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
# ------------- RENDERING API CONFIG --------------
|
| 47 |
|
| 48 |
# If you decided to use Replicate for the RENDERING engine
|
|
@@ -69,6 +74,8 @@ RENDERING_OPENAI_API_MODEL="dall-e-3"
|
|
| 69 |
|
| 70 |
# ------------- LLM API CONFIG ----------------
|
| 71 |
|
|
|
|
|
|
|
| 72 |
# If you decided to use OpenAI for the LLM engine
|
| 73 |
LLM_OPENAI_API_BASE_URL="https://api.openai.com/v1"
|
| 74 |
LLM_OPENAI_API_MODEL="gpt-4"
|
|
|
|
| 10 |
# - INFERENCE_ENDPOINT
|
| 11 |
# - INFERENCE_API
|
| 12 |
# - OPENAI
|
| 13 |
+
# - GROQ
|
| 14 |
LLM_ENGINE="INFERENCE_API"
|
| 15 |
|
| 16 |
NEXT_PUBLIC_MAX_NB_PAGES="2"
|
|
|
|
| 44 |
# An experimental RENDERING engine (sorry it is not very documented yet, so you can use one of the other engines)
|
| 45 |
AUTH_VIDEOCHAIN_API_TOKEN=
|
| 46 |
|
| 47 |
+
|
| 48 |
+
# Groq.com key: available for the LLM engine
|
| 49 |
+
AUTH_GROQ_API_KEY=
|
| 50 |
+
|
| 51 |
# ------------- RENDERING API CONFIG --------------
|
| 52 |
|
| 53 |
# If you decided to use Replicate for the RENDERING engine
|
|
|
|
| 74 |
|
| 75 |
# ------------- LLM API CONFIG ----------------
|
| 76 |
|
| 77 |
+
LLM_GROQ_API_MODEL="mixtral-8x7b-32768"
|
| 78 |
+
|
| 79 |
# If you decided to use OpenAI for the LLM engine
|
| 80 |
LLM_OPENAI_API_BASE_URL="https://api.openai.com/v1"
|
| 81 |
LLM_OPENAI_API_MODEL="gpt-4"
|
README.md
CHANGED
|
@@ -24,13 +24,14 @@ it requires various components to run for the frontend, backend, LLM, SDXL etc.
|
|
| 24 |
If you try to duplicate the project, open the `.env` you will see it requires some variables.
|
| 25 |
|
| 26 |
Provider config:
|
| 27 |
-
- `LLM_ENGINE`: can be one of: "INFERENCE_API", "INFERENCE_ENDPOINT", "OPENAI"
|
| 28 |
- `RENDERING_ENGINE`: can be one of: "INFERENCE_API", "INFERENCE_ENDPOINT", "REPLICATE", "VIDEOCHAIN", "OPENAI" for now, unless you code your custom solution
|
| 29 |
|
| 30 |
Auth config:
|
| 31 |
-
- `AUTH_HF_API_TOKEN`:
|
| 32 |
-
- `
|
| 33 |
-
- `
|
|
|
|
| 34 |
- `AUTH_REPLICATE_API_TOKEN`: in case you want to use Replicate.com
|
| 35 |
|
| 36 |
Rendering config:
|
|
@@ -42,9 +43,12 @@ Rendering config:
|
|
| 42 |
- `RENDERING_REPLICATE_API_MODEL`: optional, defaults to "stabilityai/sdxl"
|
| 43 |
- `RENDERING_REPLICATE_API_MODEL_VERSION`: optional, in case you want to change the version
|
| 44 |
|
| 45 |
-
Language model config:
|
| 46 |
- `LLM_HF_INFERENCE_ENDPOINT_URL`: "<use your own>"
|
| 47 |
-
- `LLM_HF_INFERENCE_API_MODEL`: "
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
In addition, there are some community sharing variables that you can just ignore.
|
| 50 |
Those variables are not required to run the AI Comic Factory on your own website or computer
|
|
@@ -108,14 +112,23 @@ To activate it, create a `.env.local` configuration file:
|
|
| 108 |
LLM_ENGINE="OPENAI"
|
| 109 |
|
| 110 |
# default openai api base url is: https://api.openai.com/v1
|
| 111 |
-
LLM_OPENAI_API_BASE_URL="
|
| 112 |
|
| 113 |
LLM_OPENAI_API_MODEL="gpt-3.5-turbo"
|
| 114 |
|
| 115 |
-
AUTH_OPENAI_API_KEY="
|
| 116 |
```
|
|
|
|
| 117 |
|
| 118 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
|
| 120 |
Another option could be to disable the LLM completely and replace it with another LLM protocol and/or provider (eg. Claude, Replicate), or a human-generated story instead (by returning mock or static data).
|
| 121 |
|
|
|
|
| 24 |
If you try to duplicate the project, open the `.env` you will see it requires some variables.
|
| 25 |
|
| 26 |
Provider config:
|
| 27 |
+
- `LLM_ENGINE`: can be one of: "INFERENCE_API", "INFERENCE_ENDPOINT", "OPENAI", or "GROQ"
|
| 28 |
- `RENDERING_ENGINE`: can be one of: "INFERENCE_API", "INFERENCE_ENDPOINT", "REPLICATE", "VIDEOCHAIN", "OPENAI" for now, unless you code your custom solution
|
| 29 |
|
| 30 |
Auth config:
|
| 31 |
+
- `AUTH_HF_API_TOKEN`: if you decide to use Hugging Face for the LLM engine (inference api model or a custom inference endpoint)
|
| 32 |
+
- `AUTH_OPENAI_API_KEY`: to use OpenAI for the LLM engine
|
| 33 |
+
- `AUTH_GROQ_API_KEY`: to use Groq for the LLM engine
|
| 34 |
+
- `AUTH_VIDEOCHAIN_API_TOKEN`: secret token to access the VideoChain API server
|
| 35 |
- `AUTH_REPLICATE_API_TOKEN`: in case you want to use Replicate.com
|
| 36 |
|
| 37 |
Rendering config:
|
|
|
|
| 43 |
- `RENDERING_REPLICATE_API_MODEL`: optional, defaults to "stabilityai/sdxl"
|
| 44 |
- `RENDERING_REPLICATE_API_MODEL_VERSION`: optional, in case you want to change the version
|
| 45 |
|
| 46 |
+
Language model config (depending on the LLM engine you decide to use):
|
| 47 |
- `LLM_HF_INFERENCE_ENDPOINT_URL`: "<use your own>"
|
| 48 |
+
- `LLM_HF_INFERENCE_API_MODEL`: "HuggingFaceH4/zephyr-7b-beta"
|
| 49 |
+
- `LLM_OPENAI_API_BASE_URL`: "https://api.openai.com/v1"
|
| 50 |
+
- `LLM_OPENAI_API_MODEL`: "gpt-4"
|
| 51 |
+
- `LLM_GROQ_API_MODEL`: "mixtral-8x7b-32768"
|
| 52 |
|
| 53 |
In addition, there are some community sharing variables that you can just ignore.
|
| 54 |
Those variables are not required to run the AI Comic Factory on your own website or computer
|
|
|
|
| 112 |
LLM_ENGINE="OPENAI"
|
| 113 |
|
| 114 |
# default openai api base url is: https://api.openai.com/v1
|
| 115 |
+
LLM_OPENAI_API_BASE_URL="A custom OpenAI API Base URL if you have some special privileges"
|
| 116 |
|
| 117 |
LLM_OPENAI_API_MODEL="gpt-3.5-turbo"
|
| 118 |
|
| 119 |
+
AUTH_OPENAI_API_KEY="Yourown OpenAI API Key"
|
| 120 |
```
|
| 121 |
+
### Option 4: (new, experimental) use Groq
|
| 122 |
|
| 123 |
+
```bash
|
| 124 |
+
LLM_ENGINE="GROQ"
|
| 125 |
+
|
| 126 |
+
LLM_GROQ_API_MODEL="mixtral-8x7b-32768"
|
| 127 |
+
|
| 128 |
+
AUTH_GROQ_API_KEY="Your own GROQ API Key"
|
| 129 |
+
```
|
| 130 |
+
|
| 131 |
+
### Option 5: Fork and modify the code to use a different LLM system
|
| 132 |
|
| 133 |
Another option could be to disable the LLM completely and replace it with another LLM protocol and/or provider (eg. Claude, Replicate), or a human-generated story instead (by returning mock or static data).
|
| 134 |
|
package-lock.json
CHANGED
|
@@ -40,6 +40,7 @@
|
|
| 40 |
"encoding": "^0.1.13",
|
| 41 |
"eslint": "8.45.0",
|
| 42 |
"eslint-config-next": "13.4.10",
|
|
|
|
| 43 |
"html2canvas": "^1.4.1",
|
| 44 |
"konva": "^9.2.2",
|
| 45 |
"lucide-react": "^0.260.0",
|
|
@@ -4166,6 +4167,30 @@
|
|
| 4166 |
"resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
|
| 4167 |
"integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag=="
|
| 4168 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4169 |
"node_modules/has-bigints": {
|
| 4170 |
"version": "1.0.2",
|
| 4171 |
"resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz",
|
|
|
|
| 40 |
"encoding": "^0.1.13",
|
| 41 |
"eslint": "8.45.0",
|
| 42 |
"eslint-config-next": "13.4.10",
|
| 43 |
+
"groq-sdk": "^0.3.1",
|
| 44 |
"html2canvas": "^1.4.1",
|
| 45 |
"konva": "^9.2.2",
|
| 46 |
"lucide-react": "^0.260.0",
|
|
|
|
| 4167 |
"resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz",
|
| 4168 |
"integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag=="
|
| 4169 |
},
|
| 4170 |
+
"node_modules/groq-sdk": {
|
| 4171 |
+
"version": "0.3.1",
|
| 4172 |
+
"resolved": "https://registry.npmjs.org/groq-sdk/-/groq-sdk-0.3.1.tgz",
|
| 4173 |
+
"integrity": "sha512-A3/u52JDBR1BzAmMCc+XceDJdNGc0KipDrJOWeIFIYMy6vz4hWvfJBFLXgoS7MHNcLZ4jG89L48JhH/ONcaiMA==",
|
| 4174 |
+
"dependencies": {
|
| 4175 |
+
"@types/node": "^18.11.18",
|
| 4176 |
+
"@types/node-fetch": "^2.6.4",
|
| 4177 |
+
"abort-controller": "^3.0.0",
|
| 4178 |
+
"agentkeepalive": "^4.2.1",
|
| 4179 |
+
"digest-fetch": "^1.3.0",
|
| 4180 |
+
"form-data-encoder": "1.7.2",
|
| 4181 |
+
"formdata-node": "^4.3.2",
|
| 4182 |
+
"node-fetch": "^2.6.7",
|
| 4183 |
+
"web-streams-polyfill": "^3.2.1"
|
| 4184 |
+
}
|
| 4185 |
+
},
|
| 4186 |
+
"node_modules/groq-sdk/node_modules/@types/node": {
|
| 4187 |
+
"version": "18.19.21",
|
| 4188 |
+
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.21.tgz",
|
| 4189 |
+
"integrity": "sha512-2Q2NeB6BmiTFQi4DHBzncSoq/cJMLDdhPaAoJFnFCyD9a8VPZRf7a1GAwp1Edb7ROaZc5Jz/tnZyL6EsWMRaqw==",
|
| 4190 |
+
"dependencies": {
|
| 4191 |
+
"undici-types": "~5.26.4"
|
| 4192 |
+
}
|
| 4193 |
+
},
|
| 4194 |
"node_modules/has-bigints": {
|
| 4195 |
"version": "1.0.2",
|
| 4196 |
"resolved": "https://registry.npmjs.org/has-bigints/-/has-bigints-1.0.2.tgz",
|
package.json
CHANGED
|
@@ -41,6 +41,7 @@
|
|
| 41 |
"encoding": "^0.1.13",
|
| 42 |
"eslint": "8.45.0",
|
| 43 |
"eslint-config-next": "13.4.10",
|
|
|
|
| 44 |
"html2canvas": "^1.4.1",
|
| 45 |
"konva": "^9.2.2",
|
| 46 |
"lucide-react": "^0.260.0",
|
|
|
|
| 41 |
"encoding": "^0.1.13",
|
| 42 |
"eslint": "8.45.0",
|
| 43 |
"eslint-config-next": "13.4.10",
|
| 44 |
+
"groq-sdk": "^0.3.1",
|
| 45 |
"html2canvas": "^1.4.1",
|
| 46 |
"konva": "^9.2.2",
|
| 47 |
"lucide-react": "^0.260.0",
|
src/app/interface/page/index.tsx
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
import { allLayoutAspectRatios, allLayouts, allLayoutsNbPanels } from "@/app/layouts"
|
| 2 |
import { useStore } from "@/app/store"
|
|
|
|
| 3 |
import { cn } from "@/lib/utils"
|
| 4 |
import { useEffect, useRef } from "react"
|
| 5 |
|
|
@@ -14,6 +15,7 @@ export function Page({ page }: { page: number}) {
|
|
| 14 |
const aspectRatio = ((allLayoutAspectRatios as any)[layout] as string) || "aspect-[250/297]"
|
| 15 |
|
| 16 |
const nbPanels = ((allLayoutsNbPanels as any)[layout] as number) || 4
|
|
|
|
| 17 |
|
| 18 |
/*
|
| 19 |
const [canLoad, setCanLoad] = useState(false)
|
|
@@ -41,21 +43,31 @@ export function Page({ page }: { page: number}) {
|
|
| 41 |
ref={pageRef}
|
| 42 |
className={cn(
|
| 43 |
`w-full`,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
aspectRatio,
|
| 45 |
`transition-all duration-100 ease-in-out`,
|
| 46 |
`border border-stone-200`,
|
| 47 |
`shadow-2xl`,
|
| 48 |
`print:shadow-none`,
|
| 49 |
`print:border-0`,
|
| 50 |
-
`print:width-screen`,
|
| 51 |
-
`print:break-after-all`
|
| 52 |
)}
|
| 53 |
style={{
|
| 54 |
padding: `${Math.round((zoomLevel / 100) * 16)}px`
|
| 55 |
// marginLeft: `${zoomLevel > 100 ? `100`}`
|
| 56 |
}}
|
| 57 |
>
|
| 58 |
-
|
|
|
|
|
|
|
| 59 |
</div>
|
| 60 |
)
|
| 61 |
}
|
|
|
|
| 1 |
import { allLayoutAspectRatios, allLayouts, allLayoutsNbPanels } from "@/app/layouts"
|
| 2 |
import { useStore } from "@/app/store"
|
| 3 |
+
import { NB_PANELS_PER_PAGE } from "@/config"
|
| 4 |
import { cn } from "@/lib/utils"
|
| 5 |
import { useEffect, useRef } from "react"
|
| 6 |
|
|
|
|
| 15 |
const aspectRatio = ((allLayoutAspectRatios as any)[layout] as string) || "aspect-[250/297]"
|
| 16 |
|
| 17 |
const nbPanels = ((allLayoutsNbPanels as any)[layout] as number) || 4
|
| 18 |
+
const nbPages = Math.round(nbPanels / NB_PANELS_PER_PAGE)
|
| 19 |
|
| 20 |
/*
|
| 21 |
const [canLoad, setCanLoad] = useState(false)
|
|
|
|
| 43 |
ref={pageRef}
|
| 44 |
className={cn(
|
| 45 |
`w-full`,
|
| 46 |
+
`print:w-screen`,
|
| 47 |
+
`print:break-after-all`
|
| 48 |
+
)}
|
| 49 |
+
style={{
|
| 50 |
+
padding: `${Math.round((zoomLevel / 100) * 16)}px`
|
| 51 |
+
// marginLeft: `${zoomLevel > 100 ? `100`}`
|
| 52 |
+
}}
|
| 53 |
+
>
|
| 54 |
+
<div
|
| 55 |
+
className={cn(
|
| 56 |
aspectRatio,
|
| 57 |
`transition-all duration-100 ease-in-out`,
|
| 58 |
`border border-stone-200`,
|
| 59 |
`shadow-2xl`,
|
| 60 |
`print:shadow-none`,
|
| 61 |
`print:border-0`,
|
|
|
|
|
|
|
| 62 |
)}
|
| 63 |
style={{
|
| 64 |
padding: `${Math.round((zoomLevel / 100) * 16)}px`
|
| 65 |
// marginLeft: `${zoomLevel > 100 ? `100`}`
|
| 66 |
}}
|
| 67 |
>
|
| 68 |
+
<LayoutElement page={page} nbPanels={nbPanels} />
|
| 69 |
+
</div>
|
| 70 |
+
{nbPages > 1 && <p className="w-full text-center pt-4 font-sans text-2xs font-semibold text-stone-600">Page {page + 1} / {nbPages}</p>}
|
| 71 |
</div>
|
| 72 |
)
|
| 73 |
}
|
src/app/interface/settings-dialog/defaultSettings.ts
CHANGED
|
@@ -14,4 +14,7 @@ export const defaultSettings: Settings = {
|
|
| 14 |
replicateApiModelTrigger: "",
|
| 15 |
openaiApiKey: "",
|
| 16 |
openaiApiModel: "dall-e-3",
|
|
|
|
|
|
|
|
|
|
| 17 |
}
|
|
|
|
| 14 |
replicateApiModelTrigger: "",
|
| 15 |
openaiApiKey: "",
|
| 16 |
openaiApiModel: "dall-e-3",
|
| 17 |
+
openaiApiLanguageModel: "gpt-4",
|
| 18 |
+
groqApiKey: "",
|
| 19 |
+
groqApiLanguageModel: "mixtral-8x7b-32768",
|
| 20 |
}
|
src/app/interface/settings-dialog/getSettings.ts
CHANGED
|
@@ -21,6 +21,9 @@ export function getSettings(): Settings {
|
|
| 21 |
replicateApiModelTrigger: getValidString(localStorage?.getItem?.(localStorageKeys.replicateApiModelTrigger), defaultSettings.replicateApiModelTrigger),
|
| 22 |
openaiApiKey: getValidString(localStorage?.getItem?.(localStorageKeys.openaiApiKey), defaultSettings.openaiApiKey),
|
| 23 |
openaiApiModel: getValidString(localStorage?.getItem?.(localStorageKeys.openaiApiModel), defaultSettings.openaiApiModel),
|
|
|
|
|
|
|
|
|
|
| 24 |
}
|
| 25 |
} catch (err) {
|
| 26 |
return {
|
|
|
|
| 21 |
replicateApiModelTrigger: getValidString(localStorage?.getItem?.(localStorageKeys.replicateApiModelTrigger), defaultSettings.replicateApiModelTrigger),
|
| 22 |
openaiApiKey: getValidString(localStorage?.getItem?.(localStorageKeys.openaiApiKey), defaultSettings.openaiApiKey),
|
| 23 |
openaiApiModel: getValidString(localStorage?.getItem?.(localStorageKeys.openaiApiModel), defaultSettings.openaiApiModel),
|
| 24 |
+
openaiApiLanguageModel: getValidString(localStorage?.getItem?.(localStorageKeys.openaiApiLanguageModel), defaultSettings.openaiApiLanguageModel),
|
| 25 |
+
groqApiKey: getValidString(localStorage?.getItem?.(localStorageKeys.groqApiKey), defaultSettings.groqApiKey),
|
| 26 |
+
groqApiLanguageModel: getValidString(localStorage?.getItem?.(localStorageKeys.groqApiLanguageModel), defaultSettings.groqApiLanguageModel),
|
| 27 |
}
|
| 28 |
} catch (err) {
|
| 29 |
return {
|
src/app/interface/settings-dialog/localStorageKeys.ts
CHANGED
|
@@ -14,4 +14,7 @@ export const localStorageKeys: Record<keyof Settings, string> = {
|
|
| 14 |
replicateApiModelTrigger: "CONF_RENDERING_REPLICATE_API_MODEL_TRIGGER",
|
| 15 |
openaiApiKey: "CONF_AUTH_OPENAI_API_KEY",
|
| 16 |
openaiApiModel: "CONF_AUTH_OPENAI_API_MODEL",
|
|
|
|
|
|
|
|
|
|
| 17 |
}
|
|
|
|
| 14 |
replicateApiModelTrigger: "CONF_RENDERING_REPLICATE_API_MODEL_TRIGGER",
|
| 15 |
openaiApiKey: "CONF_AUTH_OPENAI_API_KEY",
|
| 16 |
openaiApiModel: "CONF_AUTH_OPENAI_API_MODEL",
|
| 17 |
+
openaiApiLanguageModel: "CONF_AUTH_OPENAI_API_LANGUAGE_MODEL",
|
| 18 |
+
groqApiKey: "CONF_AUTH_GROQ_API_KEY",
|
| 19 |
+
groqApiLanguageModel: "CONF_AUTH_GROQ_API_LANGUAGE_MODEL",
|
| 20 |
}
|
src/app/queries/predict.ts
CHANGED
|
@@ -3,7 +3,11 @@
|
|
| 3 |
import { LLMEngine } from "@/types"
|
| 4 |
import { predict as predictWithHuggingFace } from "./predictWithHuggingFace"
|
| 5 |
import { predict as predictWithOpenAI } from "./predictWithOpenAI"
|
|
|
|
| 6 |
|
| 7 |
const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
|
| 8 |
|
| 9 |
-
export const predict =
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
import { LLMEngine } from "@/types"
|
| 4 |
import { predict as predictWithHuggingFace } from "./predictWithHuggingFace"
|
| 5 |
import { predict as predictWithOpenAI } from "./predictWithOpenAI"
|
| 6 |
+
import { predict as predictWithGroq } from "./predictWithGroq"
|
| 7 |
|
| 8 |
const llmEngine = `${process.env.LLM_ENGINE || ""}` as LLMEngine
|
| 9 |
|
| 10 |
+
export const predict =
|
| 11 |
+
llmEngine === "GROQ" ? predictWithGroq :
|
| 12 |
+
llmEngine === "OPENAI" ? predictWithOpenAI :
|
| 13 |
+
predictWithHuggingFace
|
src/app/queries/predictWithGroq.ts
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"use server"
|
| 2 |
+
|
| 3 |
+
import Groq from "groq-sdk"
|
| 4 |
+
|
| 5 |
+
export async function predict(inputs: string, nbPanels: number): Promise<string> {
|
| 6 |
+
const groqApiKey = `${process.env.AUTH_GROQ_API_KEY || ""}`
|
| 7 |
+
const groqApiModel = `${process.env.LLM_GROQ_API_MODEL || "mixtral-8x7b-32768"}`
|
| 8 |
+
|
| 9 |
+
const groq = new Groq({
|
| 10 |
+
apiKey: groqApiKey,
|
| 11 |
+
})
|
| 12 |
+
|
| 13 |
+
const messages: Groq.Chat.Completions.CompletionCreateParams.Message[] = [
|
| 14 |
+
{ role: "assistant", content: "" },
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
try {
|
| 18 |
+
const res = await groq.chat.completions.create({
|
| 19 |
+
messages: messages,
|
| 20 |
+
model: groqApiModel,
|
| 21 |
+
})
|
| 22 |
+
|
| 23 |
+
return res.choices[0].message.content || ""
|
| 24 |
+
} catch (err) {
|
| 25 |
+
console.error(`error during generation: ${err}`)
|
| 26 |
+
return ""
|
| 27 |
+
}
|
| 28 |
+
}
|
src/lib/useOAuth.ts
CHANGED
|
@@ -56,6 +56,22 @@ export function useOAuth({
|
|
| 56 |
canLogin,
|
| 57 |
isLoggedIn,
|
| 58 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
}
|
| 60 |
|
| 61 |
useEffect(() => {
|
|
|
|
| 56 |
canLogin,
|
| 57 |
isLoggedIn,
|
| 58 |
})
|
| 59 |
+
|
| 60 |
+
/*
|
| 61 |
+
useOAuth debug: {
|
| 62 |
+
oauthResult: '',
|
| 63 |
+
clientId: '........',
|
| 64 |
+
redirectUrl: 'http://localhost:3000',
|
| 65 |
+
scopes: 'openid profile inference-api',
|
| 66 |
+
isOAuthEnabled: true,
|
| 67 |
+
isBetaEnabled: false,
|
| 68 |
+
code: '...........',
|
| 69 |
+
state: '{"nonce":".........","redirectUri":"http://localhost:3000"}',
|
| 70 |
+
hasReceivedFreshOAuth: true,
|
| 71 |
+
canLogin: false,
|
| 72 |
+
isLoggedIn: false
|
| 73 |
+
}
|
| 74 |
+
*/
|
| 75 |
}
|
| 76 |
|
| 77 |
useEffect(() => {
|
src/types.ts
CHANGED
|
@@ -100,6 +100,7 @@ export type LLMEngine =
|
|
| 100 |
| "INFERENCE_ENDPOINT"
|
| 101 |
| "OPENAI"
|
| 102 |
| "REPLICATE"
|
|
|
|
| 103 |
|
| 104 |
export type RenderingEngine =
|
| 105 |
| "VIDEOCHAIN"
|
|
@@ -154,6 +155,7 @@ export type LayoutProps = {
|
|
| 154 |
nbPanels: number
|
| 155 |
}
|
| 156 |
|
|
|
|
| 157 |
export type Settings = {
|
| 158 |
renderingModelVendor: RenderingModelVendor
|
| 159 |
renderingUseTurbo: boolean
|
|
@@ -168,4 +170,7 @@ export type Settings = {
|
|
| 168 |
replicateApiModelTrigger: string
|
| 169 |
openaiApiKey: string
|
| 170 |
openaiApiModel: string
|
|
|
|
|
|
|
|
|
|
| 171 |
}
|
|
|
|
| 100 |
| "INFERENCE_ENDPOINT"
|
| 101 |
| "OPENAI"
|
| 102 |
| "REPLICATE"
|
| 103 |
+
| "GROQ"
|
| 104 |
|
| 105 |
export type RenderingEngine =
|
| 106 |
| "VIDEOCHAIN"
|
|
|
|
| 155 |
nbPanels: number
|
| 156 |
}
|
| 157 |
|
| 158 |
+
// TODO: rename the *Model fields to better indicate if this is a LLM or RENDER mdoel
|
| 159 |
export type Settings = {
|
| 160 |
renderingModelVendor: RenderingModelVendor
|
| 161 |
renderingUseTurbo: boolean
|
|
|
|
| 170 |
replicateApiModelTrigger: string
|
| 171 |
openaiApiKey: string
|
| 172 |
openaiApiModel: string
|
| 173 |
+
openaiApiLanguageModel: string
|
| 174 |
+
groqApiKey: string
|
| 175 |
+
groqApiLanguageModel: string
|
| 176 |
}
|