From d6ae513545baf3d8178d219603e758f43c45852d Mon Sep 17 00:00:00 2001 From: Valery Bugakov Date: Mon, 13 Jan 2025 10:10:11 +0900 Subject: [PATCH 1/3] feat(audoedit): improve error logging --- .../src/autoedits/autoedits-provider.test.ts | 39 ++ vscode/src/autoedits/autoedits-provider.ts | 454 +++++++++--------- vscode/src/autoedits/test-helpers.ts | 15 +- vscode/src/services/sentry/sentry.ts | 10 +- 4 files changed, 293 insertions(+), 225 deletions(-) diff --git a/vscode/src/autoedits/autoedits-provider.test.ts b/vscode/src/autoedits/autoedits-provider.test.ts index 59003cdf0477..b5dbd8ccb7ea 100644 --- a/vscode/src/autoedits/autoedits-provider.test.ts +++ b/vscode/src/autoedits/autoedits-provider.test.ts @@ -1,3 +1,4 @@ +import * as sentryCore from '@sentry/core' import * as uuid from 'uuid' import { type MockInstance, @@ -307,6 +308,44 @@ describe('AutoeditsProvider', () => { expect(suggestedEventPayload.metadata.isRead).toBe(1) }) + it('errors are reported via telemetry recorded and Sentry', async () => { + const captureExceptionSpy = vi.spyOn(sentryCore, 'captureException') + const testError = new Error('test-error') + + const { result } = await autoeditResultFor('const x = █', { + prediction: 'const x = 1\n', + getModelResponse() { + throw testError + }, + }) + + expect(result).toBe(null) + + // Error is captured by the telemetry recorded + expect(recordSpy).toHaveBeenCalledTimes(1) + expect(recordSpy).toHaveBeenNthCalledWith(1, 'cody.autoedit', 'error', expect.any(Object)) + + const errorPayload = recordSpy.mock.calls[0].at(2) + expect(errorPayload).toMatchInlineSnapshot(` + { + "metadata": { + "count": 1, + }, + "privateMetadata": { + "message": "test-error", + "traceId": undefined, + }, + "version": 0, + } + `) + + // Error is captured by the Sentry service + expect(captureExceptionSpy).toHaveBeenCalledTimes(1) + + const captureExceptionPayload = captureExceptionSpy.mock.calls[0].at(0) + expect(captureExceptionPayload).toEqual(testError) + }) + it('rejects the current suggestion when the new one is shown', async () => { const prediction = 'const x = 1\n' const { provider } = await autoeditResultFor('const a = █\n', { prediction }) diff --git a/vscode/src/autoedits/autoedits-provider.ts b/vscode/src/autoedits/autoedits-provider.ts index c79194e4d57d..02589675bfcd 100644 --- a/vscode/src/autoedits/autoedits-provider.ts +++ b/vscode/src/autoedits/autoedits-provider.ts @@ -158,269 +158,285 @@ export class AutoeditsProvider implements vscode.InlineCompletionItemProvider, v inlineCompletionContext: vscode.InlineCompletionContext, token?: vscode.CancellationToken ): Promise { - const start = getTimeNowInMillis() - const controller = new AbortController() - const abortSignal = controller.signal - token?.onCancellationRequested(() => controller.abort()) - - await new Promise(resolve => setTimeout(resolve, INLINE_COMPLETION_DEFAULT_DEBOUNCE_INTERVAL_MS)) - if (abortSignal.aborted) { - autoeditsOutputChannelLogger.logDebugIfVerbose( - 'provideInlineCompletionItems', - 'debounce aborted before calculating getCurrentDocContext' + try { + const start = getTimeNowInMillis() + const controller = new AbortController() + const abortSignal = controller.signal + token?.onCancellationRequested(() => controller.abort()) + + await new Promise(resolve => + setTimeout(resolve, INLINE_COMPLETION_DEFAULT_DEBOUNCE_INTERVAL_MS) ) - return null - } - - autoeditsOutputChannelLogger.logDebugIfVerbose( - 'provideInlineCompletionItems', - 'Calculating getCurrentDocContext...' - ) - const docContext = getCurrentDocContext({ - document, - position, - maxPrefixLength: tokensToChars(autoeditsProviderConfig.tokenLimit.prefixTokens), - maxSuffixLength: tokensToChars(autoeditsProviderConfig.tokenLimit.suffixTokens), - }) - - const { - fileWithMarkerPrompt, - areaPrompt, - codeToReplaceData, - codeToReplaceData: { codeToRewrite }, - } = getCurrentFilePromptComponents({ - docContext, - document, - position, - tokenBudget: autoeditsProviderConfig.tokenLimit, - }) - - autoeditsOutputChannelLogger.logDebugIfVerbose( - 'provideInlineCompletionItems', - 'Calculating context from contextMixer...' - ) - const requestId = autoeditAnalyticsLogger.createRequest({ - startedAt: performance.now(), - codeToReplaceData, - position, - docContext, - document, - payload: { - languageId: document.languageId, - model: autoeditsProviderConfig.model, - codeToRewrite, - triggerKind: autoeditTriggerKind.automatic, - }, - }) + if (abortSignal.aborted) { + autoeditsOutputChannelLogger.logDebugIfVerbose( + 'provideInlineCompletionItems', + 'debounce aborted before calculating getCurrentDocContext' + ) + return null + } - const { context, contextSummary } = await this.contextMixer.getContext({ - document, - position, - docContext, - maxChars: 32_000, - }) - autoeditAnalyticsLogger.markAsContextLoaded({ - requestId, - payload: { - contextSummary, - }, - }) - if (abortSignal.aborted) { autoeditsOutputChannelLogger.logDebugIfVerbose( 'provideInlineCompletionItems', - 'aborted in getContext' + 'Calculating getCurrentDocContext...' ) - return null - } - - autoeditsOutputChannelLogger.logDebugIfVerbose( - 'provideInlineCompletionItems', - 'Calculating prompt from promptStrategy...' - ) - const prompt = this.promptStrategy.getPromptForModelType({ - fileWithMarkerPrompt, - areaPrompt, - context, - tokenBudget: autoeditsProviderConfig.tokenLimit, - isChatModel: autoeditsProviderConfig.isChatModel, - }) + const docContext = getCurrentDocContext({ + document, + position, + maxPrefixLength: tokensToChars(autoeditsProviderConfig.tokenLimit.prefixTokens), + maxSuffixLength: tokensToChars(autoeditsProviderConfig.tokenLimit.suffixTokens), + }) - autoeditsOutputChannelLogger.logDebugIfVerbose( - 'provideInlineCompletionItems', - 'Calculating prediction from getPrediction...' - ) - const initialPrediction = await this.getPrediction({ - document, - position, - prompt, - codeToReplaceData, - }) + const { + fileWithMarkerPrompt, + areaPrompt, + codeToReplaceData, + codeToReplaceData: { codeToRewrite }, + } = getCurrentFilePromptComponents({ + docContext, + document, + position, + tokenBudget: autoeditsProviderConfig.tokenLimit, + }) - if (abortSignal?.aborted) { autoeditsOutputChannelLogger.logDebugIfVerbose( 'provideInlineCompletionItems', - 'client aborted after getPrediction' + 'Calculating context from contextMixer...' ) + const requestId = autoeditAnalyticsLogger.createRequest({ + startedAt: performance.now(), + codeToReplaceData, + position, + docContext, + document, + payload: { + languageId: document.languageId, + model: autoeditsProviderConfig.model, + codeToRewrite, + triggerKind: autoeditTriggerKind.automatic, + }, + }) - autoeditAnalyticsLogger.markAsDiscarded({ + const { context, contextSummary } = await this.contextMixer.getContext({ + document, + position, + docContext, + maxChars: 32_000, + }) + autoeditAnalyticsLogger.markAsContextLoaded({ requestId, - discardReason: autoeditDiscardReason.clientAborted, + payload: { + contextSummary, + }, }) - return null - } + if (abortSignal.aborted) { + autoeditsOutputChannelLogger.logDebugIfVerbose( + 'provideInlineCompletionItems', + 'aborted in getContext' + ) + return null + } - if (initialPrediction === undefined || initialPrediction.length === 0) { autoeditsOutputChannelLogger.logDebugIfVerbose( 'provideInlineCompletionItems', - 'received empty prediction' + 'Calculating prompt from promptStrategy...' ) - - autoeditAnalyticsLogger.markAsDiscarded({ - requestId, - discardReason: autoeditDiscardReason.emptyPrediction, + const prompt = this.promptStrategy.getPromptForModelType({ + fileWithMarkerPrompt, + areaPrompt, + context, + tokenBudget: autoeditsProviderConfig.tokenLimit, + isChatModel: autoeditsProviderConfig.isChatModel, }) - return null - } - autoeditAnalyticsLogger.markAsLoaded({ - requestId, - prompt, - payload: { - source: autoeditSource.network, - isFuzzyMatch: false, - responseHeaders: {}, - prediction: initialPrediction, - }, - }) - autoeditsOutputChannelLogger.logDebug( - 'provideInlineCompletionItems', - `"${requestId}" ============= Response:\n${initialPrediction}\n` + - `============= Time Taken: ${getTimeNowInMillis() - start}ms` - ) - - const prediction = shrinkPredictionUntilSuffix({ - prediction: initialPrediction, - codeToReplaceData, - }) - - if (prediction === codeToRewrite) { autoeditsOutputChannelLogger.logDebugIfVerbose( 'provideInlineCompletionItems', - 'prediction equals to code to rewrite' + 'Calculating prediction from getPrediction...' ) - autoeditAnalyticsLogger.markAsDiscarded({ - requestId, - discardReason: autoeditDiscardReason.predictionEqualsCodeToRewrite, + const initialPrediction = await this.getPrediction({ + document, + position, + prompt, + codeToReplaceData, }) - return null - } - const shouldFilterPredictionBasedRecentEdits = this.filterPrediction.shouldFilterPrediction({ - uri: document.uri, - prediction, - codeToRewrite, - }) + if (abortSignal?.aborted) { + autoeditsOutputChannelLogger.logDebugIfVerbose( + 'provideInlineCompletionItems', + 'client aborted after getPrediction' + ) - if (shouldFilterPredictionBasedRecentEdits) { - autoeditsOutputChannelLogger.logDebugIfVerbose( - 'provideInlineCompletionItems', - 'based on recent edits' - ) - autoeditAnalyticsLogger.markAsDiscarded({ - requestId, - discardReason: autoeditDiscardReason.recentEdits, - }) - return null - } + autoeditAnalyticsLogger.markAsDiscarded({ + requestId, + discardReason: autoeditDiscardReason.clientAborted, + }) + return null + } - const decorationInfo = getDecorationInfoFromPrediction(document, prediction, codeToReplaceData) + if (initialPrediction === undefined || initialPrediction.length === 0) { + autoeditsOutputChannelLogger.logDebugIfVerbose( + 'provideInlineCompletionItems', + 'received empty prediction' + ) - if ( - isPredictedTextAlreadyInSuffix({ - codeToRewrite, - decorationInfo, - suffix: codeToReplaceData.suffixInArea + codeToReplaceData.suffixAfterArea, + autoeditAnalyticsLogger.markAsDiscarded({ + requestId, + discardReason: autoeditDiscardReason.emptyPrediction, + }) + return null + } + + autoeditAnalyticsLogger.markAsLoaded({ + requestId, + prompt, + payload: { + source: autoeditSource.network, + isFuzzyMatch: false, + responseHeaders: {}, + prediction: initialPrediction, + }, }) - ) { - autoeditsOutputChannelLogger.logDebugIfVerbose( + autoeditsOutputChannelLogger.logDebug( 'provideInlineCompletionItems', - 'skip because the prediction equals to code to rewrite' + `"${requestId}" ============= Response:\n${initialPrediction}\n` + + `============= Time Taken: ${getTimeNowInMillis() - start}ms` ) - autoeditAnalyticsLogger.markAsDiscarded({ - requestId, - discardReason: autoeditDiscardReason.suffixOverlap, - }) - return null - } - const { inlineCompletionItems, updatedDecorationInfo, updatedPrediction } = - this.rendererManager.tryMakeInlineCompletions({ - requestId, - prediction, + const prediction = shrinkPredictionUntilSuffix({ + prediction: initialPrediction, codeToReplaceData, - document, - position, - docContext, - decorationInfo, }) - if (inlineCompletionItems === null && updatedDecorationInfo === null) { - autoeditsOutputChannelLogger.logDebugIfVerbose( - 'provideInlineCompletionItems', - 'no suggestion to render' - ) - autoeditAnalyticsLogger.markAsDiscarded({ - requestId, - discardReason: autoeditDiscardReason.emptyPredictionAfterInlineCompletionExtraction, + if (prediction === codeToRewrite) { + autoeditsOutputChannelLogger.logDebugIfVerbose( + 'provideInlineCompletionItems', + 'prediction equals to code to rewrite' + ) + autoeditAnalyticsLogger.markAsDiscarded({ + requestId, + discardReason: autoeditDiscardReason.predictionEqualsCodeToRewrite, + }) + return null + } + + const shouldFilterPredictionBasedRecentEdits = this.filterPrediction.shouldFilterPrediction({ + uri: document.uri, + prediction, + codeToRewrite, }) - return null - } - const editor = vscode.window.activeTextEditor - if (!editor || !areSameUriDocs(document, editor.document)) { - autoeditsOutputChannelLogger.logDebugIfVerbose( - 'provideInlineCompletionItems', - 'no active editor' + if (shouldFilterPredictionBasedRecentEdits) { + autoeditsOutputChannelLogger.logDebugIfVerbose( + 'provideInlineCompletionItems', + 'based on recent edits' + ) + autoeditAnalyticsLogger.markAsDiscarded({ + requestId, + discardReason: autoeditDiscardReason.recentEdits, + }) + return null + } + + const decorationInfo = getDecorationInfoFromPrediction( + document, + prediction, + codeToReplaceData ) - autoeditAnalyticsLogger.markAsDiscarded({ + + if ( + isPredictedTextAlreadyInSuffix({ + codeToRewrite, + decorationInfo, + suffix: codeToReplaceData.suffixInArea + codeToReplaceData.suffixAfterArea, + }) + ) { + autoeditsOutputChannelLogger.logDebugIfVerbose( + 'provideInlineCompletionItems', + 'skip because the prediction equals to code to rewrite' + ) + autoeditAnalyticsLogger.markAsDiscarded({ + requestId, + discardReason: autoeditDiscardReason.suffixOverlap, + }) + return null + } + + const { inlineCompletionItems, updatedDecorationInfo, updatedPrediction } = + this.rendererManager.tryMakeInlineCompletions({ + requestId, + prediction, + codeToReplaceData, + document, + position, + docContext, + decorationInfo, + }) + + if (inlineCompletionItems === null && updatedDecorationInfo === null) { + autoeditsOutputChannelLogger.logDebugIfVerbose( + 'provideInlineCompletionItems', + 'no suggestion to render' + ) + autoeditAnalyticsLogger.markAsDiscarded({ + requestId, + discardReason: autoeditDiscardReason.emptyPredictionAfterInlineCompletionExtraction, + }) + return null + } + + const editor = vscode.window.activeTextEditor + if (!editor || !areSameUriDocs(document, editor.document)) { + autoeditsOutputChannelLogger.logDebugIfVerbose( + 'provideInlineCompletionItems', + 'no active editor' + ) + autoeditAnalyticsLogger.markAsDiscarded({ + requestId, + discardReason: autoeditDiscardReason.noActiveEditor, + }) + return null + } + + // Save metadata required for the agent API calls. + // `this.unstable_handleDidShowCompletionItem` can't receive anything apart from the `requestId` + // because the agent does not know anything about our internal state. + // We need to ensure all the relevant metadata can be retrieved from `requestId` only. + autoeditAnalyticsLogger.markAsPostProcessed({ requestId, - discardReason: autoeditDiscardReason.noActiveEditor, + prediction: updatedPrediction, + decorationInfo: updatedDecorationInfo, + inlineCompletionItems, }) - return null - } - // Save metadata required for the agent API calls. - // `this.unstable_handleDidShowCompletionItem` can't receive anything apart from the `requestId` - // because the agent does not know anything about our internal state. - // We need to ensure all the relevant metadata can be retrieved from `requestId` only. - autoeditAnalyticsLogger.markAsPostProcessed({ - requestId, - prediction: updatedPrediction, - decorationInfo: updatedDecorationInfo, - inlineCompletionItems, - }) + if (!isRunningInsideAgent()) { + // Since VS Code has no callback as to when a completion is shown, we assume + // that if we pass the above visibility tests, the completion is going to be + // rendered in the UI + await this.unstable_handleDidShowCompletionItem(requestId) + } - if (!isRunningInsideAgent()) { - // Since VS Code has no callback as to when a completion is shown, we assume - // that if we pass the above visibility tests, the completion is going to be - // rendered in the UI - await this.unstable_handleDidShowCompletionItem(requestId) - } + if (updatedDecorationInfo) { + await this.rendererManager.renderInlineDecorations(updatedDecorationInfo) + } - if (updatedDecorationInfo) { - await this.rendererManager.renderInlineDecorations(updatedDecorationInfo) - } + // The data structure returned to the agent's from the `autoedits/execute` calls. + // Note: this is subject to change later once we start working on the agent API. + const result: AutoeditsResult = { + items: inlineCompletionItems || [], + requestId, + prediction, + decorationInfo, + } - // The data structure returned to the agent's from the `autoedits/execute` calls. - // Note: this is subject to change later once we start working on the agent API. - const result: AutoeditsResult = { - items: inlineCompletionItems || [], - requestId, - prediction, - decorationInfo, - } + return result + } catch (error) { + const errorToReport = + error instanceof Error + ? error + : new Error(`provideInlineCompletionItems autoedit error: ${error}`) - return result + autoeditAnalyticsLogger.logError(errorToReport) + return null + } } /** diff --git a/vscode/src/autoedits/test-helpers.ts b/vscode/src/autoedits/test-helpers.ts index c31ee9848768..3473bbe8111a 100644 --- a/vscode/src/autoedits/test-helpers.ts +++ b/vscode/src/autoedits/test-helpers.ts @@ -33,12 +33,19 @@ export async function autoeditResultFor( prediction, token, provider: existingProvider, + getModelResponse, }: { prediction: string /** provide to reuse an existing provider instance */ provider?: AutoeditsProvider inlineCompletionContext?: vscode.InlineCompletionContext token?: vscode.CancellationToken + getModelResponse?: ( + url: string, + body: string, + apiKey: string, + customHeaders?: Record + ) => Promise } ): Promise<{ result: AutoeditsResult | null @@ -47,8 +54,7 @@ export async function autoeditResultFor( provider: AutoeditsProvider editBuilder: WorkspaceEdit }> { - // TODO: add a callback to verify `getModelResponse` arguments. - vi.spyOn(adapters, 'getModelResponse').mockImplementation(async (..._args: unknown[]) => { + const getModelResponseMock = async (...args: unknown[]) => { // Simulate response latency. vi.advanceTimersByTime(100) @@ -59,7 +65,10 @@ export async function autoeditResultFor( }, ], } - }) + } + + // TODO: add a callback to verify `getModelResponse` arguments. + vi.spyOn(adapters, 'getModelResponse').mockImplementation(getModelResponse || getModelResponseMock) const editBuilder = new WorkspaceEdit() const { document, position } = documentAndPosition(textWithCursor) diff --git a/vscode/src/services/sentry/sentry.ts b/vscode/src/services/sentry/sentry.ts index ffb269c937f5..a8c6506e8222 100644 --- a/vscode/src/services/sentry/sentry.ts +++ b/vscode/src/services/sentry/sentry.ts @@ -92,11 +92,15 @@ export function shouldErrorBeReported(error: unknown, insideAgent: boolean): boo return false } + // TODO(valery): verify if the stack-substring condition is still applicable + // Reason: we don't have _any_ errors in Sentry which is suspicious. + // Original PR: https://github.com/sourcegraph/cody/pull/3540 + // // Attempt to silence errors from other extensions (if we're inside a VS Code extension, the // stack trace should include the extension name). - if (isError(error) && !insideAgent && !error.stack?.includes('sourcegraph.cody-ai')) { - return false - } + // if (isError(error) && !insideAgent && !error.stack?.includes('sourcegraph.cody-ai')) { + // return false + // } return true } From c15af6d6d4d40b680dcd15956881e04c431516f2 Mon Sep 17 00:00:00 2001 From: Valery Bugakov Date: Mon, 13 Jan 2025 11:14:05 +0900 Subject: [PATCH 2/3] feat(audoedit): add follow-up issue --- vscode/src/services/sentry/sentry.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/vscode/src/services/sentry/sentry.ts b/vscode/src/services/sentry/sentry.ts index a8c6506e8222..d2a293c0f042 100644 --- a/vscode/src/services/sentry/sentry.ts +++ b/vscode/src/services/sentry/sentry.ts @@ -95,6 +95,7 @@ export function shouldErrorBeReported(error: unknown, insideAgent: boolean): boo // TODO(valery): verify if the stack-substring condition is still applicable // Reason: we don't have _any_ errors in Sentry which is suspicious. // Original PR: https://github.com/sourcegraph/cody/pull/3540 + // Follow-up issue: https://linear.app/sourcegraph/issue/CODY-4673/telemetry-verify-error-filtering-conditions-after-the-change // // Attempt to silence errors from other extensions (if we're inside a VS Code extension, the // stack trace should include the extension name). From e15c5a599b2355f7062d013aae0a5d5dbb3557ef Mon Sep 17 00:00:00 2001 From: Valery Bugakov Date: Mon, 13 Jan 2025 19:45:02 +0900 Subject: [PATCH 3/3] chore(audoedit): cleanup --- vscode/src/services/sentry/sentry.ts | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/vscode/src/services/sentry/sentry.ts b/vscode/src/services/sentry/sentry.ts index d2a293c0f042..bcf7d5ce5113 100644 --- a/vscode/src/services/sentry/sentry.ts +++ b/vscode/src/services/sentry/sentry.ts @@ -92,16 +92,10 @@ export function shouldErrorBeReported(error: unknown, insideAgent: boolean): boo return false } - // TODO(valery): verify if the stack-substring condition is still applicable - // Reason: we don't have _any_ errors in Sentry which is suspicious. - // Original PR: https://github.com/sourcegraph/cody/pull/3540 + // TODO(valery): verify if the stack-substring condition is still applicable. + // Condition we used: `!error.stack?.includes('sourcegraph.cody-ai')` + // Reason: currently, we don't have _any_ errors in Sentry which is suspicious. + // Original PR that introduced the condition: https://github.com/sourcegraph/cody/pull/3540 // Follow-up issue: https://linear.app/sourcegraph/issue/CODY-4673/telemetry-verify-error-filtering-conditions-after-the-change - // - // Attempt to silence errors from other extensions (if we're inside a VS Code extension, the - // stack trace should include the extension name). - // if (isError(error) && !insideAgent && !error.stack?.includes('sourcegraph.cody-ai')) { - // return false - // } - return true }