source dump of claude code
at main 605 lines 20 kB view raw
1import { feature } from 'bun:bundle' 2import type { 3 Base64ImageSource, 4 ContentBlockParam, 5 ImageBlockParam, 6} from '@anthropic-ai/sdk/resources/messages.mjs' 7import { randomUUID } from 'crypto' 8import type { QuerySource } from 'src/constants/querySource.js' 9import { logEvent } from 'src/services/analytics/index.js' 10import { getContentText } from 'src/utils/messages.js' 11import { 12 findCommand, 13 getCommandName, 14 isBridgeSafeCommand, 15 type LocalJSXCommandContext, 16} from '../../commands.js' 17import type { CanUseToolFn } from '../../hooks/useCanUseTool.js' 18import type { IDESelection } from '../../hooks/useIdeSelection.js' 19import type { SetToolJSXFn, ToolUseContext } from '../../Tool.js' 20import type { 21 AssistantMessage, 22 AttachmentMessage, 23 Message, 24 ProgressMessage, 25 SystemMessage, 26 UserMessage, 27} from '../../types/message.js' 28import type { PermissionMode } from '../../types/permissions.js' 29import { 30 isValidImagePaste, 31 type PromptInputMode, 32} from '../../types/textInputTypes.js' 33import { 34 type AgentMentionAttachment, 35 createAttachmentMessage, 36 getAttachmentMessages, 37} from '../attachments.js' 38import type { PastedContent } from '../config.js' 39import type { EffortValue } from '../effort.js' 40import { toArray } from '../generators.js' 41import { 42 executeUserPromptSubmitHooks, 43 getUserPromptSubmitHookBlockingMessage, 44} from '../hooks.js' 45import { 46 createImageMetadataText, 47 maybeResizeAndDownsampleImageBlock, 48} from '../imageResizer.js' 49import { storeImages } from '../imageStore.js' 50import { 51 createCommandInputMessage, 52 createSystemMessage, 53 createUserMessage, 54} from '../messages.js' 55import { queryCheckpoint } from '../queryProfiler.js' 56import { parseSlashCommand } from '../slashCommandParsing.js' 57import { 58 hasUltraplanKeyword, 59 replaceUltraplanKeyword, 60} from '../ultraplan/keyword.js' 61import { processTextPrompt } from './processTextPrompt.js' 62export type ProcessUserInputContext = ToolUseContext & LocalJSXCommandContext 63 64export type ProcessUserInputBaseResult = { 65 messages: ( 66 | UserMessage 67 | AssistantMessage 68 | AttachmentMessage 69 | SystemMessage 70 | ProgressMessage 71 )[] 72 shouldQuery: boolean 73 allowedTools?: string[] 74 model?: string 75 effort?: EffortValue 76 // Output text for non-interactive mode (e.g., forked commands) 77 // When set, this is used as the result in -p mode instead of empty string 78 resultText?: string 79 // When set, prefills or submits the next input after command completes 80 // Used by /discover to chain into the selected feature's command 81 nextInput?: string 82 submitNextInput?: boolean 83} 84 85export async function processUserInput({ 86 input, 87 preExpansionInput, 88 mode, 89 setToolJSX, 90 context, 91 pastedContents, 92 ideSelection, 93 messages, 94 setUserInputOnProcessing, 95 uuid, 96 isAlreadyProcessing, 97 querySource, 98 canUseTool, 99 skipSlashCommands, 100 bridgeOrigin, 101 isMeta, 102 skipAttachments, 103}: { 104 input: string | Array<ContentBlockParam> 105 /** 106 * Input before [Pasted text #N] expansion. Used for ultraplan keyword 107 * detection so pasted content containing the word cannot trigger. Falls 108 * back to the string `input` when unset. 109 */ 110 preExpansionInput?: string 111 mode: PromptInputMode 112 setToolJSX: SetToolJSXFn 113 context: ProcessUserInputContext 114 pastedContents?: Record<number, PastedContent> 115 ideSelection?: IDESelection 116 messages?: Message[] 117 setUserInputOnProcessing?: (prompt?: string) => void 118 uuid?: string 119 isAlreadyProcessing?: boolean 120 querySource?: QuerySource 121 canUseTool?: CanUseToolFn 122 /** 123 * When true, input starting with `/` is treated as plain text. 124 * Used for remotely-received messages (bridge/CCR) that should not 125 * trigger local slash commands or skills. 126 */ 127 skipSlashCommands?: boolean 128 /** 129 * When true, slash commands matching isBridgeSafeCommand() execute even 130 * though skipSlashCommands is set. See QueuedCommand.bridgeOrigin. 131 */ 132 bridgeOrigin?: boolean 133 /** 134 * When true, the resulting UserMessage gets `isMeta: true` (user-hidden, 135 * model-visible). Propagated from `QueuedCommand.isMeta` for queued 136 * system-generated prompts. 137 */ 138 isMeta?: boolean 139 skipAttachments?: boolean 140}): Promise<ProcessUserInputBaseResult> { 141 const inputString = typeof input === 'string' ? input : null 142 // Immediately show the user input prompt while we are still processing the input. 143 // Skip for isMeta (system-generated prompts like scheduled tasks) — those 144 // should run invisibly. 145 if (mode === 'prompt' && inputString !== null && !isMeta) { 146 setUserInputOnProcessing?.(inputString) 147 } 148 149 queryCheckpoint('query_process_user_input_base_start') 150 151 const appState = context.getAppState() 152 153 const result = await processUserInputBase( 154 input, 155 mode, 156 setToolJSX, 157 context, 158 pastedContents, 159 ideSelection, 160 messages, 161 uuid, 162 isAlreadyProcessing, 163 querySource, 164 canUseTool, 165 appState.toolPermissionContext.mode, 166 skipSlashCommands, 167 bridgeOrigin, 168 isMeta, 169 skipAttachments, 170 preExpansionInput, 171 ) 172 queryCheckpoint('query_process_user_input_base_end') 173 174 if (!result.shouldQuery) { 175 return result 176 } 177 178 // Execute UserPromptSubmit hooks and handle blocking 179 queryCheckpoint('query_hooks_start') 180 const inputMessage = getContentText(input) || '' 181 182 for await (const hookResult of executeUserPromptSubmitHooks( 183 inputMessage, 184 appState.toolPermissionContext.mode, 185 context, 186 context.requestPrompt, 187 )) { 188 // We only care about the result 189 if (hookResult.message?.type === 'progress') { 190 continue 191 } 192 193 // Return only a system-level error message, erasing the original user input 194 if (hookResult.blockingError) { 195 const blockingMessage = getUserPromptSubmitHookBlockingMessage( 196 hookResult.blockingError, 197 ) 198 return { 199 messages: [ 200 // TODO: Make this an attachment message 201 createSystemMessage( 202 `${blockingMessage}\n\nOriginal prompt: ${input}`, 203 'warning', 204 ), 205 ], 206 shouldQuery: false, 207 allowedTools: result.allowedTools, 208 } 209 } 210 211 // If preventContinuation is set, stop processing but keep the original 212 // prompt in context. 213 if (hookResult.preventContinuation) { 214 const message = hookResult.stopReason 215 ? `Operation stopped by hook: ${hookResult.stopReason}` 216 : 'Operation stopped by hook' 217 result.messages.push( 218 createUserMessage({ 219 content: message, 220 }), 221 ) 222 result.shouldQuery = false 223 return result 224 } 225 226 // Collect additional contexts 227 if ( 228 hookResult.additionalContexts && 229 hookResult.additionalContexts.length > 0 230 ) { 231 result.messages.push( 232 createAttachmentMessage({ 233 type: 'hook_additional_context', 234 content: hookResult.additionalContexts.map(applyTruncation), 235 hookName: 'UserPromptSubmit', 236 toolUseID: `hook-${randomUUID()}`, 237 hookEvent: 'UserPromptSubmit', 238 }), 239 ) 240 } 241 242 // TODO: Clean this up 243 if (hookResult.message) { 244 switch (hookResult.message.attachment.type) { 245 case 'hook_success': 246 if (!hookResult.message.attachment.content) { 247 // Skip if there is no content 248 break 249 } 250 result.messages.push({ 251 ...hookResult.message, 252 attachment: { 253 ...hookResult.message.attachment, 254 content: applyTruncation(hookResult.message.attachment.content), 255 }, 256 }) 257 break 258 default: 259 result.messages.push(hookResult.message) 260 break 261 } 262 } 263 } 264 queryCheckpoint('query_hooks_end') 265 266 // Happy path: onQuery will clear userInputOnProcessing via startTransition 267 // so it resolves in the same frame as deferredMessages (no flicker gap). 268 // Error paths are handled by handlePromptSubmit's finally block. 269 return result 270} 271 272const MAX_HOOK_OUTPUT_LENGTH = 10000 273 274function applyTruncation(content: string): string { 275 if (content.length > MAX_HOOK_OUTPUT_LENGTH) { 276 return `${content.substring(0, MAX_HOOK_OUTPUT_LENGTH)}… [output truncated - exceeded ${MAX_HOOK_OUTPUT_LENGTH} characters]` 277 } 278 return content 279} 280 281async function processUserInputBase( 282 input: string | Array<ContentBlockParam>, 283 mode: PromptInputMode, 284 setToolJSX: SetToolJSXFn, 285 context: ProcessUserInputContext, 286 pastedContents?: Record<number, PastedContent>, 287 ideSelection?: IDESelection, 288 messages?: Message[], 289 uuid?: string, 290 isAlreadyProcessing?: boolean, 291 querySource?: QuerySource, 292 canUseTool?: CanUseToolFn, 293 permissionMode?: PermissionMode, 294 skipSlashCommands?: boolean, 295 bridgeOrigin?: boolean, 296 isMeta?: boolean, 297 skipAttachments?: boolean, 298 preExpansionInput?: string, 299): Promise<ProcessUserInputBaseResult> { 300 let inputString: string | null = null 301 let precedingInputBlocks: ContentBlockParam[] = [] 302 303 // Collect image metadata texts for isMeta message 304 const imageMetadataTexts: string[] = [] 305 306 // Normalized view of `input` with image blocks resized. For string input 307 // this is just `input`; for array input it's the processed blocks. We pass 308 // this (not raw `input`) to processTextPrompt so resized/normalized image 309 // blocks actually reach the API — otherwise the resize work above is 310 // discarded for the regular prompt path. Also normalizes bridge inputs 311 // where iOS may send `mediaType` instead of `media_type` (mobile-apps#5825). 312 let normalizedInput: string | ContentBlockParam[] = input 313 314 if (typeof input === 'string') { 315 inputString = input 316 } else if (input.length > 0) { 317 queryCheckpoint('query_image_processing_start') 318 const processedBlocks: ContentBlockParam[] = [] 319 for (const block of input) { 320 if (block.type === 'image') { 321 const resized = await maybeResizeAndDownsampleImageBlock(block) 322 // Collect image metadata for isMeta message 323 if (resized.dimensions) { 324 const metadataText = createImageMetadataText(resized.dimensions) 325 if (metadataText) { 326 imageMetadataTexts.push(metadataText) 327 } 328 } 329 processedBlocks.push(resized.block) 330 } else { 331 processedBlocks.push(block) 332 } 333 } 334 normalizedInput = processedBlocks 335 queryCheckpoint('query_image_processing_end') 336 // Extract the input string from the last content block if it is text, 337 // and keep track of the preceding content blocks 338 const lastBlock = processedBlocks[processedBlocks.length - 1] 339 if (lastBlock?.type === 'text') { 340 inputString = lastBlock.text 341 precedingInputBlocks = processedBlocks.slice(0, -1) 342 } else { 343 precedingInputBlocks = processedBlocks 344 } 345 } 346 347 if (inputString === null && mode !== 'prompt') { 348 throw new Error(`Mode: ${mode} requires a string input.`) 349 } 350 351 // Extract and convert image content to content blocks early 352 // Keep track of IDs in order for message storage 353 const imageContents = pastedContents 354 ? Object.values(pastedContents).filter(isValidImagePaste) 355 : [] 356 const imagePasteIds = imageContents.map(img => img.id) 357 358 // Store images to disk so Claude can reference the path in context 359 // (for manipulation with CLI tools, uploading to PRs, etc.) 360 const storedImagePaths = pastedContents 361 ? await storeImages(pastedContents) 362 : new Map<number, string>() 363 364 // Resize pasted images to ensure they fit within API limits (parallel processing) 365 queryCheckpoint('query_pasted_image_processing_start') 366 const imageProcessingResults = await Promise.all( 367 imageContents.map(async pastedImage => { 368 const imageBlock: ImageBlockParam = { 369 type: 'image', 370 source: { 371 type: 'base64', 372 media_type: (pastedImage.mediaType || 373 'image/png') as Base64ImageSource['media_type'], 374 data: pastedImage.content, 375 }, 376 } 377 logEvent('tengu_pasted_image_resize_attempt', { 378 original_size_bytes: pastedImage.content.length, 379 }) 380 const resized = await maybeResizeAndDownsampleImageBlock(imageBlock) 381 return { 382 resized, 383 originalDimensions: pastedImage.dimensions, 384 sourcePath: 385 pastedImage.sourcePath ?? storedImagePaths.get(pastedImage.id), 386 } 387 }), 388 ) 389 // Collect results preserving order 390 const imageContentBlocks: ContentBlockParam[] = [] 391 for (const { 392 resized, 393 originalDimensions, 394 sourcePath, 395 } of imageProcessingResults) { 396 // Collect image metadata for isMeta message (prefer resized dimensions) 397 if (resized.dimensions) { 398 const metadataText = createImageMetadataText( 399 resized.dimensions, 400 sourcePath, 401 ) 402 if (metadataText) { 403 imageMetadataTexts.push(metadataText) 404 } 405 } else if (originalDimensions) { 406 // Fall back to original dimensions if resize didn't provide them 407 const metadataText = createImageMetadataText( 408 originalDimensions, 409 sourcePath, 410 ) 411 if (metadataText) { 412 imageMetadataTexts.push(metadataText) 413 } 414 } else if (sourcePath) { 415 // If we have a source path but no dimensions, still add source info 416 imageMetadataTexts.push(`[Image source: ${sourcePath}]`) 417 } 418 imageContentBlocks.push(resized.block) 419 } 420 queryCheckpoint('query_pasted_image_processing_end') 421 422 // Bridge-safe slash command override: mobile/web clients set bridgeOrigin 423 // with skipSlashCommands still true (defense-in-depth against exit words and 424 // immediate-command fast paths). Resolve the command here — if it passes 425 // isBridgeSafeCommand, clear the skip so the gate below opens. If it's a 426 // known-but-unsafe command (local-jsx UI or terminal-only), short-circuit 427 // with a helpful message rather than letting the model see raw "/config". 428 let effectiveSkipSlash = skipSlashCommands 429 if (bridgeOrigin && inputString !== null && inputString.startsWith('/')) { 430 const parsed = parseSlashCommand(inputString) 431 const cmd = parsed 432 ? findCommand(parsed.commandName, context.options.commands) 433 : undefined 434 if (cmd) { 435 if (isBridgeSafeCommand(cmd)) { 436 effectiveSkipSlash = false 437 } else { 438 const msg = `/${getCommandName(cmd)} isn't available over Remote Control.` 439 return { 440 messages: [ 441 createUserMessage({ content: inputString, uuid }), 442 createCommandInputMessage( 443 `<local-command-stdout>${msg}</local-command-stdout>`, 444 ), 445 ], 446 shouldQuery: false, 447 resultText: msg, 448 } 449 } 450 } 451 // Unknown /foo or unparseable — fall through to plain text, same as 452 // pre-#19134. A mobile user typing "/shrug" shouldn't see "Unknown skill". 453 } 454 455 // Ultraplan keyword — route through /ultraplan. Detect on the 456 // pre-expansion input so pasted content containing the word cannot 457 // trigger a CCR session; replace with "plan" in the expanded input so 458 // the CCR prompt receives paste contents and stays grammatical. See 459 // keyword.ts for the quote/path exclusions. Interactive prompt mode + 460 // non-slash-prefixed only: 461 // headless/print mode filters local-jsx commands out of context.options, 462 // so routing to /ultraplan there yields "Unknown skill" — and there's no 463 // rainbow animation in print mode anyway. 464 // Runs before attachment extraction so this path matches the slash-command 465 // path below (no await between setUserInputOnProcessing and setAppState — 466 // React batches both into one render, no flash). 467 if ( 468 feature('ULTRAPLAN') && 469 mode === 'prompt' && 470 !context.options.isNonInteractiveSession && 471 inputString !== null && 472 !effectiveSkipSlash && 473 !inputString.startsWith('/') && 474 !context.getAppState().ultraplanSessionUrl && 475 !context.getAppState().ultraplanLaunching && 476 hasUltraplanKeyword(preExpansionInput ?? inputString) 477 ) { 478 logEvent('tengu_ultraplan_keyword', {}) 479 const rewritten = replaceUltraplanKeyword(inputString).trim() 480 const { processSlashCommand } = await import('./processSlashCommand.js') 481 const slashResult = await processSlashCommand( 482 `/ultraplan ${rewritten}`, 483 precedingInputBlocks, 484 imageContentBlocks, 485 [], 486 context, 487 setToolJSX, 488 uuid, 489 isAlreadyProcessing, 490 canUseTool, 491 ) 492 return addImageMetadataMessage(slashResult, imageMetadataTexts) 493 } 494 495 // For slash commands, attachments will be extracted within getMessagesForSlashCommand 496 const shouldExtractAttachments = 497 !skipAttachments && 498 inputString !== null && 499 (mode !== 'prompt' || effectiveSkipSlash || !inputString.startsWith('/')) 500 501 queryCheckpoint('query_attachment_loading_start') 502 const attachmentMessages = shouldExtractAttachments 503 ? await toArray( 504 getAttachmentMessages( 505 inputString, 506 context, 507 ideSelection ?? null, 508 [], // queuedCommands - handled by query.ts for mid-turn attachments 509 messages, 510 querySource, 511 ), 512 ) 513 : [] 514 queryCheckpoint('query_attachment_loading_end') 515 516 // Bash commands 517 if (inputString !== null && mode === 'bash') { 518 const { processBashCommand } = await import('./processBashCommand.js') 519 return addImageMetadataMessage( 520 await processBashCommand( 521 inputString, 522 precedingInputBlocks, 523 attachmentMessages, 524 context, 525 setToolJSX, 526 ), 527 imageMetadataTexts, 528 ) 529 } 530 531 // Slash commands 532 // Skip for remote bridge messages — input from CCR clients is plain text 533 if ( 534 inputString !== null && 535 !effectiveSkipSlash && 536 inputString.startsWith('/') 537 ) { 538 const { processSlashCommand } = await import('./processSlashCommand.js') 539 const slashResult = await processSlashCommand( 540 inputString, 541 precedingInputBlocks, 542 imageContentBlocks, 543 attachmentMessages, 544 context, 545 setToolJSX, 546 uuid, 547 isAlreadyProcessing, 548 canUseTool, 549 ) 550 return addImageMetadataMessage(slashResult, imageMetadataTexts) 551 } 552 553 // Log agent mention queries for analysis 554 if (inputString !== null && mode === 'prompt') { 555 const trimmedInput = inputString.trim() 556 557 const agentMention = attachmentMessages.find( 558 (m): m is AttachmentMessage<AgentMentionAttachment> => 559 m.attachment.type === 'agent_mention', 560 ) 561 562 if (agentMention) { 563 const agentMentionString = `@agent-${agentMention.attachment.agentType}` 564 const isSubagentOnly = trimmedInput === agentMentionString 565 const isPrefix = 566 trimmedInput.startsWith(agentMentionString) && !isSubagentOnly 567 568 // Log whenever users use @agent-<name> syntax 569 logEvent('tengu_subagent_at_mention', { 570 is_subagent_only: isSubagentOnly, 571 is_prefix: isPrefix, 572 }) 573 } 574 } 575 576 // Regular user prompt 577 return addImageMetadataMessage( 578 processTextPrompt( 579 normalizedInput, 580 imageContentBlocks, 581 imagePasteIds, 582 attachmentMessages, 583 uuid, 584 permissionMode, 585 isMeta, 586 ), 587 imageMetadataTexts, 588 ) 589} 590 591// Adds image metadata texts as isMeta message to result 592function addImageMetadataMessage( 593 result: ProcessUserInputBaseResult, 594 imageMetadataTexts: string[], 595): ProcessUserInputBaseResult { 596 if (imageMetadataTexts.length > 0) { 597 result.messages.push( 598 createUserMessage({ 599 content: imageMetadataTexts.map(text => ({ type: 'text', text })), 600 isMeta: true, 601 }), 602 ) 603 } 604 return result 605}