Skip to content

Commit a23ccfa

Browse files
committed
feat(context)!: implement schema-based input for contexts
Redesign context provider input handling with JSON schema validation and improved UX. Replace callback-based input with declarative schemas that define available properties and constraints. This enables a more consistent and maintainable approach for input handling across all context providers. Key improvements: - Add schema property to context config for input validation - Create functions to parse and describe schemas - Implement unified input separator for multi-value parameters - Update all built-in context providers to use schema-based input - Improve prompt completion and documentation generation BREAKING CHANGE: context.resolve now accepts table instead of string and context.input is built automatically, overriden through schema enum
1 parent 62b1249 commit a23ccfa

File tree

6 files changed

+500
-220
lines changed

6 files changed

+500
-220
lines changed

lua/CopilotChat/client.lua

+2-38
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
---@class CopilotChat.Client.ask
22
---@field load_history boolean
33
---@field headless boolean
4-
---@field contexts table<string, string>?
54
---@field selection CopilotChat.select.selection?
65
---@field embeddings table<CopilotChat.context.embed>?
76
---@field system_prompt string
@@ -201,49 +200,14 @@ end
201200
--- Generate ask request
202201
--- @param history table<CopilotChat.Provider.input>
203202
--- @param memory CopilotChat.Client.memory?
204-
--- @param contexts table<string, string>?
205203
--- @param prompt string
206204
--- @param system_prompt string
207205
--- @param generated_messages table<CopilotChat.Provider.input>
208-
local function generate_ask_request(history, memory, contexts, prompt, system_prompt, generated_messages)
206+
local function generate_ask_request(history, memory, prompt, system_prompt, generated_messages)
209207
local messages = {}
210208

211209
system_prompt = vim.trim(system_prompt)
212210

213-
-- Include context help
214-
if contexts and not vim.tbl_isempty(contexts) then
215-
local help_text = [[When you need additional context, request it using this format:
216-
217-
> #<command>:`<input>`
218-
219-
Examples:
220-
> #file:`path/to/file.js` (loads specific file)
221-
> #buffers:`visible` (loads all visible buffers)
222-
> #git:`staged` (loads git staged changes)
223-
> #system:`uname -a` (loads system information)
224-
225-
Guidelines:
226-
- Always request context when needed rather than guessing about files or code
227-
- Use the > format on a new line when requesting context
228-
- Output context commands directly - never ask if the user wants to provide information
229-
- Assume the user will provide requested context in their next response
230-
231-
Available context providers and their usage:]]
232-
233-
local context_names = vim.tbl_keys(contexts)
234-
table.sort(context_names)
235-
for _, name in ipairs(context_names) do
236-
local description = contexts[name]
237-
description = description:gsub('\n', '\n ')
238-
help_text = help_text .. '\n\n - #' .. name .. ': ' .. description
239-
end
240-
241-
if system_prompt ~= '' then
242-
system_prompt = system_prompt .. '\n\n'
243-
end
244-
system_prompt = system_prompt .. help_text
245-
end
246-
247211
-- Include memory
248212
if memory and memory.content and memory.content ~= '' then
249213
if system_prompt ~= '' then
@@ -694,7 +658,7 @@ function Client:ask(prompt, opts)
694658

695659
local headers = self:authenticate(provider_name)
696660
local request = provider.prepare_input(
697-
generate_ask_request(history, self.memory, opts.contexts, prompt, opts.system_prompt, generated_messages),
661+
generate_ask_request(history, self.memory, prompt, opts.system_prompt, generated_messages),
698662
options
699663
)
700664
local is_stream = request.stream

0 commit comments

Comments
 (0)