-
Notifications
You must be signed in to change notification settings - Fork 46
/
package.nls.json
118 lines (100 loc) · 9.13 KB
/
package.nls.json
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
{
"activitybar.chat.title": "AutoDev",
"view.chat.title": "Chat",
"contributes.icons.autodev-icon.description": "autodev icon",
"contributes.icons.autodev-dark.description": "autodev-dark",
"contributes.icons.autodev-error.description": "AutoDev Error",
"contributes.icons.autodev-pair.description": "AutoDev Pair",
"configuration.general.title": "General",
"configuration.enableRenameSuggestion.description": "Enable rename suggestion",
"configuration.customPromptDir.description": "Custom prompt directory",
"configuration.chat.title": "Chat",
"configuration.chat.enable.description": "Enable or disable ai chats",
"configuration.chat.provider.description": "Model Service Provider",
"configuration.chat.model.description": "Model for overwrite provider in the provider chat model.",
"configuration.chat.models.description": "List of models available for the chat view",
"configuration.completions.title": "Code Completions",
"configuration.completions.enable.description": "Enable or disable inline completions",
"configuration.completions.provider.description": "Model Service Provider",
"configuration.completions.model.description": "Model for overwrite provider in the provider completions model.",
"configuration.completions.template.description": "Prompt of model. Recommended use of \"fimSpecialTokens\"",
"configuration.completions.parameters.description": "Completion parameters of model.",
"configuration.completions.fimSpecialTokens.description": "Fill-in-the-middle (FIM) is a special prompt format supported by the code completion model can complete code between two already written code blocks.",
"configuration.completions.stops.description": "Stop words of model.",
"configuration.completions.requestDelay.markdownDescription": "Code auto-completion delay request time. Avoid excessive consumption of API tokens. `requestDelay` only works if `#autodev.completions.enable#` is enabled.",
"configuration.completions.enableLegacyMode.markdownDescription": "Use legacy `/v1/completions` instead of `/v1/chat/completions`, only openai provider.",
"configuration.storages.title": "Storages",
"configuration.storages.databaseType.description": "Database Type.",
"configuration.storages.databaseUrl.description": "Database URL. Defaults to `~/.autodev/index/autodev-index.sqlite`.",
"configuration.storages.vectorstoreType.description": "Vector Store Type.",
"configuration.storages.vectorstorePath.description": "Vector Store Database URL. Defaults to `~/.autodev/index/lancedb`.",
"configuration.embeddings.title": "Embeddings",
"configuration.embeddings.provider.description": "Model Service Provider",
"configuration.embeddings.provider.markdownDescription": "Override the model in the provider",
"configuration.embeddings.model.description": "Model for overwrite provider in the provider embeddings model.",
"configuration.embeddings.batchSize.description": "The maximum number of documents to embed in a single request.",
"configuration.providers.title": "Model Proviers",
"configuration.anthropic.baseURL.markdownDescription": "Anthropic API URL, See [API Reference](https://docs.anthropic.com/en/api/getting-started)",
"configuration.anthropic.apiKey.description": "Anthropic API key",
"configuration.anthropic.model.description": "Model name to use",
"configuration.openai.apiType.description": "The openai's service providers",
"configuration.openai.apiKey.markdownDescription": "Our legacy keys. Provides access to all organizations and all projects that user has been added to; access [API Keys](https://platform.openai.com/account/api-keys) to view your available keys. We highly advise transitioning to project keys for best security practices, although access via this method is currently still supported.",
"configuration.openai.baseURL.markdownDescription": "Override the default base URL for the API. See [API Reference](https://platform.openai.com/docs/api-reference/introduction)",
"configuration.openai.project.markdownDescription": "Provides access to a single project (preferred option); Access [Project API Keys](https://platform.openai.com/settings/organization/general) by selecting the specific project you wish to generate keys against.",
"configuration.openai.organization.description": "For users who belong to multiple organizations or are accessing their projects through their legacy user API key, you can pass a header to specify which organization and project is used for an API request. Usage from these API requests will count as usage for the specified organization and project.",
"configuration.openai.model.description": "Model name to use",
"configuration.openai.completionModel.description": "Model name to use. If not specified, the default model will be used.",
"configuration.openai.embeddingModel.description": "Model name to use",
"configuration.qianfan.apiKey.description": "Baidu Cloud API Key",
"configuration.qianfan.apiKey.markdownDescription": "Baidu Cloud API Key. See [Create an application](https://console.bce.baidu.com/qianfan/ais/console/applicationConsole/application).",
"configuration.qianfan.secretKey.description": "Baidu Cloud Secret Key.",
"configuration.qianfan.model.description": "Model name to use",
"configuration.tongyi.apiKey.description": "Ali Cloud API Key",
"configuration.tongyi.apiKey.markdownDescription": "Ali Cloud API Key. See [开通 DashScope 并创建 API-KEY](https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key).",
"configuration.tongyi.model.description": "Model name to use",
"configuration.tongyi.enableSearch.description": "Enable Search Enhancement",
"configuration.zhipuai.apiKey.description": "ZhipuAI API Key",
"configuration.zhipuai.apiKey.markdownDescription": "ZhipuAI API Key. See [智谱 AI 开放平台](https://open.bigmodel.cn/dev/api).",
"configuration.zhipuai.model.description": "Model name to use",
"configuration.zhipuai.embeddingModel.description": "Embedding Model name to use",
"configuration.ollama.baseURL.markdownDescription": "Ollama API URL, See [API Reference](https://github.com/ollama/ollama/blob/main/docs/api.md)",
"configuration.ollama.model.description": "Model name to use",
"configuration.ollama.completionModel.description": "Model name to use",
"configuration.ollama.embeddingModel.description": "Model name to use",
"configuration.transformers.remoteHost.description": "Host URL to load models from. Defaults to the Hugging Face Hub",
"configuration.transformers.remoteHost.markdownDescription": "Host URL to load models from. Defaults to the [Hugging Face Hub](https://huggingface.co)",
"configuration.transformers.remotePathTemplate.description": "Path template to fill in and append to remoteHost when loading models.",
"configuration.transformers.remotePathTemplate.markdownDescription": "Path template to fill in and append to `#autodev.transformers.remoteHost#` when loading models.",
"configuration.transformers.allowLocalModels.markdownDescription": "Whether to allow loading of local files, defaults to `true`. If set to `false`, it will skip the local file check and try to load the model from the remote host.",
"configuration.transformers.localModelPath.markdownDescription": "Path to load local models from. Defaults to `~/.autodev/models/`.",
"configuration.transformers.model.description": "Model name to use for embedding generation. Defaults to all-MiniLM-L6-v2",
"configuration.transformers.model.markdownDescription": "Model name to use for embedding generation. Defaults to all-MiniLM-L6-v2",
"configuration.transformers.onnxWasmNumThreads.description": "Number of threads to use for the WebAssembly backend. Defaults to cpus numbers.",
"configuration.transformers.onnxLoglevel.description": "Log level for the ONNX runtime. Defaults to error.",
"command.autoTest.title": "Generate Test",
"command.genApiData.title": "Generate API Data",
"command.openSettings.title": "Open Settings",
"command.showTutorial.title": "Show Tutorial",
"command.feedback.title": "Feedback",
"command.quickChat.title": "Quick Chat",
"command.AutoMethod.title": "Generate Method Codes",
"command.explainCode.title": "Explain Code",
"command.optimizeCode.title": "Optimize Code",
"command.quickFix.title": "Quick Fix",
"command.fixThis.title": "Fix This Code",
"command.autoComment.title": "Generate Documentation",
"command.customAction.title": "Custom Action",
"command.showChatPanel.title": "Start Chat",
"command.newChatSession.title": "New Session",
"command.showChatHistory.title": "History",
"command.showSystemAction.title": "Show Autodev Actions",
"command.showCodelensDetailQuickPick.title": "Show Quick Actions",
"command.codebase.createIndexes.title": "Codebase Indexing",
"command.codebase.retrievalCode.title": "Codebase Retrieval",
"configuration.codelensDisplayMode.item.expand": "Expand",
"configuration.codelensDisplayMode.item.collapse": "Collapse",
"configuration.codelensDisplayMode.description": "Controls the display of CodeLens",
"configuration.codelensDisplayItems.description": "Custom the display and sorting CodeLens Items",
"autodev.Workspace.customFrameworkCodeFiles.title":"Custom project framework context code files",
"autodev.Workspace.customFrameworkCodeFiles.description":"Custom project framework context code files"
}