Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions assets/icons/model-logo-gemini.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions assets/icons/model-logo-openai.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
22 changes: 22 additions & 0 deletions components/ModelSelector.vue
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@ const userConfig = await getUserConfig()
const ollamaBaseUrl = userConfig.llm.backends.ollama.baseUrl.toRef()
const lmStudioBaseUrl = userConfig.llm.backends.lmStudio.baseUrl.toRef()
const commonModel = userConfig.llm.model.toRef()
const geminiModel = userConfig.llm.backends.gemini.model.toRef()
const openaiModel = userConfig.llm.backends.openai.model.toRef()
const translationModel = userConfig.translation.model.toRef()
const endpointType = userConfig.llm.endpointType.toRef()
const translationEndpointType = userConfig.translation.endpointType.toRef()
Expand Down Expand Up @@ -205,6 +207,8 @@ const modelListUpdating = computed(() => {
const modelOptions = computed(() => {
const ollamaModels = modelList.value.filter((model) => model.backend === 'ollama')
const lmStudioModels = modelList.value.filter((model) => model.backend === 'lm-studio')
const geminiModels = modelList.value.filter((model) => model.backend === 'gemini')
const openaiModels = modelList.value.filter((model) => model.backend === 'openai')
const webllmModels = modelList.value.filter((model) => model.backend === 'web-llm')

const makeModelOptions = (model: typeof modelList.value[number]) => ({ type: 'option' as const, id: `${model.backend}#${model.model}`, label: model.name, model: { backend: model.backend, id: model.model } })
Expand All @@ -226,6 +230,18 @@ const modelOptions = computed(() => {
...lmStudioModels.map((model) => makeModelOptions(model)),
)
}
if (geminiModels.length) {
options.push(
makeHeader(`Gemini Models (${geminiModels.length})`),
...geminiModels.map((model) => makeModelOptions(model)),
)
}
if (openaiModels.length) {
options.push(
makeHeader(`OpenAI Models (${openaiModels.length})`),
...openaiModels.map((model) => makeModelOptions(model)),
)
}
return options
}
})
Expand All @@ -244,6 +260,12 @@ const selectedModel = computed({
if (props.modelType === 'chat') {
commonModel.value = modelInfo.model.id
endpointType.value = modelInfo.model.backend as LLMEndpointType
if (modelInfo.model.backend === 'gemini') {
geminiModel.value = modelInfo.model.id
}
else if (modelInfo.model.backend === 'openai') {
openaiModel.value = modelInfo.model.id
}
}
else {
translationModel.value = modelInfo.model.id
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,13 @@ async function checkLLMBackendStatus() {
}
else if (status === 'backend-unavailable') {
toast(t('errors.model_request_error'), { duration: 2000 })
endpointType === 'ollama' ? showSettings({ scrollTarget: 'ollama-server-address-section' }) : showSettings({ scrollTarget: 'lm-studio-server-address-section' })
endpointType === 'ollama'
? showSettings({ scrollTarget: 'ollama-server-address-section' })
: endpointType === 'lm-studio'
? showSettings({ scrollTarget: 'lm-studio-server-address-section' })
: endpointType === 'gemini'
? showSettings({ scrollTarget: 'gemini-api-config-section' })
: showSettings({ scrollTarget: 'openai-api-config-section' })
emit('close')
return false
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,13 @@ async function checkLLMBackendStatus() {
}
else if (status === 'backend-unavailable') {
toast(t('errors.model_request_error'), { duration: 2000 })
endpointType === 'ollama' ? showSettings({ scrollTarget: 'ollama-server-address-section' }) : showSettings({ scrollTarget: 'lm-studio-server-address-section' })
endpointType === 'ollama'
? showSettings({ scrollTarget: 'ollama-server-address-section' })
: endpointType === 'lm-studio'
? showSettings({ scrollTarget: 'lm-studio-server-address-section' })
: endpointType === 'gemini'
? showSettings({ scrollTarget: 'gemini-api-config-section' })
: showSettings({ scrollTarget: 'openai-api-config-section' })
emit('close')
return false
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,13 @@ async function checkLLMBackendStatus() {
}
else if (status === 'backend-unavailable') {
toast(t('errors.model_request_error'), { duration: 2000 })
endpointType === 'ollama' ? showSettings({ scrollTarget: 'ollama-server-address-section' }) : showSettings({ scrollTarget: 'lm-studio-server-address-section' })
endpointType === 'ollama'
? showSettings({ scrollTarget: 'ollama-server-address-section' })
: endpointType === 'lm-studio'
? showSettings({ scrollTarget: 'lm-studio-server-address-section' })
: endpointType === 'gemini'
? showSettings({ scrollTarget: 'gemini-api-config-section' })
: showSettings({ scrollTarget: 'openai-api-config-section' })
emit('close')
return false
}
Expand Down
8 changes: 7 additions & 1 deletion entrypoints/content/composables/useTranslator.ts
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,13 @@ async function _useTranslator() {
const { status, endpointType } = await llmBackendStatusStore.checkCurrentBackendStatus()
if (status === 'backend-unavailable') {
toast('Failed to connect to Ollama server, please check your Ollama connection', { duration: 2000 })
endpointType === 'ollama' ? showSettings({ scrollTarget: `ollama-server-address-section` }) : showSettings({ scrollTarget: `lm-studio-server-address-section` })
endpointType === 'ollama'
? showSettings({ scrollTarget: 'ollama-server-address-section' })
: endpointType === 'lm-studio'
? showSettings({ scrollTarget: 'lm-studio-server-address-section' })
: endpointType === 'gemini'
? showSettings({ scrollTarget: 'gemini-api-config-section' })
: showSettings({ scrollTarget: 'openai-api-config-section' })
return
}
else if (status === 'no-model') {
Expand Down
2 changes: 1 addition & 1 deletion entrypoints/main-world-injected/llm-api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ export class LLMResponses {
async create(params: ResponseCreateParamsStreaming): Promise<StreamResponseObject>
async create(params: ResponseCreateParamsBase): Promise<StreamResponseObject | NonStreamResponseObject | NonStreamStructuredResponseObject> {
const readyStatus = await checkBackendModel(params.model)
if (!readyStatus.backend) throw new Error('ollama is not connected')
if (!readyStatus.backend) throw new Error('backend is not connected')
if (!readyStatus.model) throw new Error('model is not ready')
if (params.stream) {
return this.createStreamingResponse(params as ResponseCreateParamsStreaming)
Expand Down
7 changes: 5 additions & 2 deletions entrypoints/main-world-injected/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,13 @@ export async function getBrowserAIConfig() {
export async function checkBackendModel(model?: string) {
const status = await m2cRpc.checkBackendModelReady(model)
if (!status.backend || !status.model) {
const modelUnavailableMessage = model
? `Model [${model}] is not available in current provider settings.`
: 'Model is not available in current provider settings.'
await m2cRpc.emit('toast', {
message: !status.backend ? 'This page relies on the AI backend provided by Nativemind. Please ensure the backend is running.' : `Model [${model}] is not available. Please download the model from <a href="https://ollama.com/library/${model}" target="_blank">ollama.com</a>.`,
message: !status.backend ? 'This page relies on the AI backend provided by Nativemind. Please ensure the backend is running.' : modelUnavailableMessage,
type: 'error',
isHTML: true,
isHTML: false,
duration: 5000,
})
}
Expand Down
2 changes: 2 additions & 0 deletions entrypoints/settings/components/DebugSettings/index.vue
Original file line number Diff line number Diff line change
Expand Up @@ -630,6 +630,8 @@ const articles = ref<{ type: 'html' | 'pdf', url: string, title: string, content
const modelProviderOptions = [
{ id: 'ollama' as const, label: 'Ollama' },
{ id: 'lm-studio' as const, label: 'LM Studio' },
{ id: 'gemini' as const, label: 'Gemini API' },
{ id: 'openai' as const, label: 'OpenAI API' },
{ id: 'web-llm' as const, label: 'Web LLM' },
]

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,200 @@
<script setup lang="ts">
import { useDebounceFn } from '@vueuse/core'
import { computed, onMounted, toRefs, watch } from 'vue'

import IconGeminiLogo from '@/assets/icons/model-logo-gemini.svg?component'
import Checkbox from '@/components/Checkbox.vue'
import Input from '@/components/Input.vue'
import ScrollTarget from '@/components/ScrollTarget.vue'
import Selector from '@/components/Selector.vue'
import Button from '@/components/ui/Button.vue'
import { SettingsScrollTarget } from '@/types/scroll-targets'
import { useLLMBackendStatusStore } from '@/utils/pinia-store/store'
import { getUserConfig } from '@/utils/user-config'

import Block from '../../Block.vue'
import SavedMessage from '../../SavedMessage.vue'
import Section from '../../Section.vue'

defineProps<{
scrollTarget?: SettingsScrollTarget
}>()

const userConfig = await getUserConfig()
const llmBackendStatusStore = useLLMBackendStatusStore()
const { geminiModelList } = toRefs(llmBackendStatusStore)
const { updateGeminiModelList } = llmBackendStatusStore
const endpointType = userConfig.llm.endpointType.toRef()
const model = userConfig.llm.backends.gemini.model.toRef()
const commonModel = userConfig.llm.model.toRef()
const baseUrl = userConfig.llm.backends.gemini.baseUrl.toRef()
const apiKey = userConfig.llm.backends.gemini.apiKey.toRef()
const commonApiKey = userConfig.llm.apiKey.toRef()
const numCtx = userConfig.llm.backends.gemini.numCtx.toRef()
const enableNumCtx = userConfig.llm.backends.gemini.enableNumCtx.toRef()
const open = userConfig.settings.blocks.geminiConfig.open.toRef()

const isCurrentEndpoint = computed(() => endpointType.value === 'gemini')
const presetModelIdSet = computed(() => new Set(geminiModelList.value.map((item) => item.id)))
const customModelOption = computed(() => {
if (!model.value || presetModelIdSet.value.has(model.value)) return undefined
return {
id: model.value,
label: `${model.value} (Custom)`,
value: model.value,
}
})
const presetModelOptions = computed(() => {
const presetOptions = geminiModelList.value.map((item) => ({
id: item.id,
label: item.name,
value: item.id,
}))
if (customModelOption.value) {
return [customModelOption.value, ...presetOptions]
}
return presetOptions
})
const selectedPresetModel = computed({
get: () => model.value,
set: (value?: string) => {
if (value) model.value = value
},
})
const modelInput = computed({
get: () => model.value ?? '',
set: (value: string) => {
model.value = value.trim()
},
})

const useGemini = () => {
endpointType.value = 'gemini'
commonApiKey.value = apiKey.value
if (!presetModelIdSet.value.has(model.value)) {
model.value = geminiModelList.value[0]?.id
}
commonModel.value = model.value
}

const refreshPresetModels = useDebounceFn(() => {
updateGeminiModelList()
}, 500)

watch([baseUrl, apiKey, commonApiKey], () => {
refreshPresetModels()
})

onMounted(() => {
updateGeminiModelList()
})
</script>

<template>
<Block
v-model:open="open"
title="Gemini API"
collapsible
>
<template #title>
<div class="flex items-center gap-3">
<div class="size-6 rounded-md flex items-center justify-center overflow-hidden shadow-02">
<IconGeminiLogo class="size-5" />
</div>
<span class="font-medium text-base">
Gemini API
</span>
</div>
</template>
<div class="flex flex-col gap-4">
<Section>
<div class="flex items-center justify-between gap-3">
<div class="text-sm text-text-secondary">
Configure Google Gemini using OpenAI-compatible API endpoint.
</div>
<Button
size="sm"
variant="secondary"
:disabled="isCurrentEndpoint"
@click="useGemini"
>
{{ isCurrentEndpoint ? 'In Use' : 'Use Gemini' }}
</Button>
</div>
</Section>

<ScrollTarget
:autoScrollIntoView="scrollTarget === 'gemini-api-config-section'"
targetId="gemini-api-config-section"
>
<Section
title="API Key"
description="Generate a key from Google AI Studio, then paste it here."
>
<div class="flex flex-col gap-1">
<Input
v-model="apiKey"
type="password"
placeholder="AIza..."
class="w-full"
/>
<SavedMessage :watch="apiKey" />
</div>
</Section>
</ScrollTarget>

<Section
title="Base URL"
description="Default value uses Gemini OpenAI-compatible endpoint."
>
<div class="flex flex-col gap-1">
<Input
v-model="baseUrl"
placeholder="https://generativelanguage.googleapis.com/v1beta/openai"
class="w-full"
/>
<SavedMessage :watch="baseUrl" />
</div>
</Section>

<Section
title="Model ID"
description="You can use preset Gemini models or enter a custom model ID."
>
<div class="flex flex-col gap-2">
<div class="w-64">
<Selector
v-model="selectedPresetModel"
:options="presetModelOptions"
placeholder="Select Gemini model"
/>
</div>
<Input
v-model="modelInput"
placeholder="gemini-flash-latest"
class="w-full"
/>
<SavedMessage :watch="modelInput" />
</div>
</Section>

<Section title="Context Window">
<div class="flex flex-col gap-2">
<Checkbox
v-model="enableNumCtx"
name="gemini-enable-num-ctx"
text="Enable custom context window"
/>
<Input
v-if="enableNumCtx"
v-model.number="numCtx"
type="number"
placeholder="8192"
class="w-full"
/>
<SavedMessage :watch="enableNumCtx ? numCtx : enableNumCtx" />
</div>
</Section>
</div>
</Block>
</template>
Loading