source dump of claude code
at main 880 lines 27 kB view raw
1import type { 2 Base64ImageSource, 3 ImageBlockParam, 4} from '@anthropic-ai/sdk/resources/messages.mjs' 5import { 6 API_IMAGE_MAX_BASE64_SIZE, 7 IMAGE_MAX_HEIGHT, 8 IMAGE_MAX_WIDTH, 9 IMAGE_TARGET_RAW_SIZE, 10} from '../constants/apiLimits.js' 11import { logEvent } from '../services/analytics/index.js' 12import { 13 getImageProcessor, 14 type SharpFunction, 15 type SharpInstance, 16} from '../tools/FileReadTool/imageProcessor.js' 17import { logForDebugging } from './debug.js' 18import { errorMessage } from './errors.js' 19import { formatFileSize } from './format.js' 20import { logError } from './log.js' 21 22type ImageMediaType = 'image/png' | 'image/jpeg' | 'image/gif' | 'image/webp' 23 24// Error type constants for analytics (numeric to comply with logEvent restrictions) 25const ERROR_TYPE_MODULE_LOAD = 1 26const ERROR_TYPE_PROCESSING = 2 27const ERROR_TYPE_UNKNOWN = 3 28const ERROR_TYPE_PIXEL_LIMIT = 4 29const ERROR_TYPE_MEMORY = 5 30const ERROR_TYPE_TIMEOUT = 6 31const ERROR_TYPE_VIPS = 7 32const ERROR_TYPE_PERMISSION = 8 33 34/** 35 * Error thrown when image resizing fails and the image exceeds the API limit. 36 */ 37export class ImageResizeError extends Error { 38 constructor(message: string) { 39 super(message) 40 this.name = 'ImageResizeError' 41 } 42} 43 44/** 45 * Classifies image processing errors for analytics. 46 * 47 * Uses error codes when available (Node.js module errors), falls back to 48 * message matching for libraries like sharp that don't expose error codes. 49 */ 50function classifyImageError(error: unknown): number { 51 // Check for Node.js error codes first (more reliable than string matching) 52 if (error instanceof Error) { 53 const errorWithCode = error as Error & { code?: string } 54 if ( 55 errorWithCode.code === 'MODULE_NOT_FOUND' || 56 errorWithCode.code === 'ERR_MODULE_NOT_FOUND' || 57 errorWithCode.code === 'ERR_DLOPEN_FAILED' 58 ) { 59 return ERROR_TYPE_MODULE_LOAD 60 } 61 if (errorWithCode.code === 'EACCES' || errorWithCode.code === 'EPERM') { 62 return ERROR_TYPE_PERMISSION 63 } 64 if (errorWithCode.code === 'ENOMEM') { 65 return ERROR_TYPE_MEMORY 66 } 67 } 68 69 // Fall back to message matching for errors without codes 70 // Note: sharp doesn't expose error codes, so we must match on messages 71 const message = errorMessage(error) 72 73 // Module loading errors from our native wrapper 74 if (message.includes('Native image processor module not available')) { 75 return ERROR_TYPE_MODULE_LOAD 76 } 77 78 // Sharp/vips processing errors (format detection, corrupt data, etc.) 79 if ( 80 message.includes('unsupported image format') || 81 message.includes('Input buffer') || 82 message.includes('Input file is missing') || 83 message.includes('Input file has corrupt header') || 84 message.includes('corrupt header') || 85 message.includes('corrupt image') || 86 message.includes('premature end') || 87 message.includes('zlib: data error') || 88 message.includes('zero width') || 89 message.includes('zero height') 90 ) { 91 return ERROR_TYPE_PROCESSING 92 } 93 94 // Pixel/dimension limit errors from sharp/vips 95 if ( 96 message.includes('pixel limit') || 97 message.includes('too many pixels') || 98 message.includes('exceeds pixel') || 99 message.includes('image dimensions') 100 ) { 101 return ERROR_TYPE_PIXEL_LIMIT 102 } 103 104 // Memory allocation failures 105 if ( 106 message.includes('out of memory') || 107 message.includes('Cannot allocate') || 108 message.includes('memory allocation') 109 ) { 110 return ERROR_TYPE_MEMORY 111 } 112 113 // Timeout errors 114 if (message.includes('timeout') || message.includes('timed out')) { 115 return ERROR_TYPE_TIMEOUT 116 } 117 118 // Vips-specific errors (VipsJpeg, VipsPng, VipsWebp, etc.) 119 if (message.includes('Vips')) { 120 return ERROR_TYPE_VIPS 121 } 122 123 return ERROR_TYPE_UNKNOWN 124} 125 126/** 127 * Computes a simple numeric hash of a string for analytics grouping. 128 * Uses djb2 algorithm, returning a 32-bit unsigned integer. 129 */ 130function hashString(str: string): number { 131 let hash = 5381 132 for (let i = 0; i < str.length; i++) { 133 hash = ((hash << 5) + hash + str.charCodeAt(i)) | 0 134 } 135 return hash >>> 0 136} 137 138export type ImageDimensions = { 139 originalWidth?: number 140 originalHeight?: number 141 displayWidth?: number 142 displayHeight?: number 143} 144 145export interface ResizeResult { 146 buffer: Buffer 147 mediaType: string 148 dimensions?: ImageDimensions 149} 150 151interface ImageCompressionContext { 152 imageBuffer: Buffer 153 metadata: { width?: number; height?: number; format?: string } 154 format: string 155 maxBytes: number 156 originalSize: number 157} 158 159interface CompressedImageResult { 160 base64: string 161 mediaType: Base64ImageSource['media_type'] 162 originalSize: number 163} 164 165/** 166 * Extracted from FileReadTool's readImage function 167 * Resizes image buffer to meet size and dimension constraints 168 */ 169export async function maybeResizeAndDownsampleImageBuffer( 170 imageBuffer: Buffer, 171 originalSize: number, 172 ext: string, 173): Promise<ResizeResult> { 174 if (imageBuffer.length === 0) { 175 // Empty buffer would fall through the catch block below (sharp throws 176 // "Unable to determine image format"), and the fallback's size check 177 // `0 ≤ 5MB` would pass it through, yielding an empty base64 string 178 // that the API rejects with `image cannot be empty`. 179 throw new ImageResizeError('Image file is empty (0 bytes)') 180 } 181 try { 182 const sharp = await getImageProcessor() 183 const image = sharp(imageBuffer) 184 const metadata = await image.metadata() 185 186 const mediaType = metadata.format ?? ext 187 // Normalize "jpg" to "jpeg" for media type compatibility 188 const normalizedMediaType = mediaType === 'jpg' ? 'jpeg' : mediaType 189 190 // If dimensions aren't available from metadata 191 if (!metadata.width || !metadata.height) { 192 if (originalSize > IMAGE_TARGET_RAW_SIZE) { 193 // Create fresh sharp instance for compression 194 const compressedBuffer = await sharp(imageBuffer) 195 .jpeg({ quality: 80 }) 196 .toBuffer() 197 return { buffer: compressedBuffer, mediaType: 'jpeg' } 198 } 199 // Return without dimensions if we can't determine them 200 return { buffer: imageBuffer, mediaType: normalizedMediaType } 201 } 202 203 // Store original dimensions (guaranteed to be defined here) 204 const originalWidth = metadata.width 205 const originalHeight = metadata.height 206 207 // Calculate dimensions while maintaining aspect ratio 208 let width = originalWidth 209 let height = originalHeight 210 211 // Check if the original file just works 212 if ( 213 originalSize <= IMAGE_TARGET_RAW_SIZE && 214 width <= IMAGE_MAX_WIDTH && 215 height <= IMAGE_MAX_HEIGHT 216 ) { 217 return { 218 buffer: imageBuffer, 219 mediaType: normalizedMediaType, 220 dimensions: { 221 originalWidth, 222 originalHeight, 223 displayWidth: width, 224 displayHeight: height, 225 }, 226 } 227 } 228 229 const needsDimensionResize = 230 width > IMAGE_MAX_WIDTH || height > IMAGE_MAX_HEIGHT 231 const isPng = normalizedMediaType === 'png' 232 233 // If dimensions are within limits but file is too large, try compression first 234 // This preserves full resolution when possible 235 if (!needsDimensionResize && originalSize > IMAGE_TARGET_RAW_SIZE) { 236 // For PNGs, try PNG compression first to preserve transparency 237 if (isPng) { 238 // Create fresh sharp instance for each compression attempt 239 const pngCompressed = await sharp(imageBuffer) 240 .png({ compressionLevel: 9, palette: true }) 241 .toBuffer() 242 if (pngCompressed.length <= IMAGE_TARGET_RAW_SIZE) { 243 return { 244 buffer: pngCompressed, 245 mediaType: 'png', 246 dimensions: { 247 originalWidth, 248 originalHeight, 249 displayWidth: width, 250 displayHeight: height, 251 }, 252 } 253 } 254 } 255 // Try JPEG compression (lossy but much smaller) 256 for (const quality of [80, 60, 40, 20]) { 257 // Create fresh sharp instance for each attempt 258 const compressedBuffer = await sharp(imageBuffer) 259 .jpeg({ quality }) 260 .toBuffer() 261 if (compressedBuffer.length <= IMAGE_TARGET_RAW_SIZE) { 262 return { 263 buffer: compressedBuffer, 264 mediaType: 'jpeg', 265 dimensions: { 266 originalWidth, 267 originalHeight, 268 displayWidth: width, 269 displayHeight: height, 270 }, 271 } 272 } 273 } 274 // Quality reduction alone wasn't enough, fall through to resize 275 } 276 277 // Constrain dimensions if needed 278 if (width > IMAGE_MAX_WIDTH) { 279 height = Math.round((height * IMAGE_MAX_WIDTH) / width) 280 width = IMAGE_MAX_WIDTH 281 } 282 283 if (height > IMAGE_MAX_HEIGHT) { 284 width = Math.round((width * IMAGE_MAX_HEIGHT) / height) 285 height = IMAGE_MAX_HEIGHT 286 } 287 288 // IMPORTANT: Always create fresh sharp(imageBuffer) instances for each operation. 289 // The native image-processor-napi module doesn't properly apply format conversions 290 // when reusing a sharp instance after calling toBuffer(). This caused a bug where 291 // all compression attempts (PNG, JPEG at various qualities) returned identical sizes. 292 logForDebugging(`Resizing to ${width}x${height}`) 293 const resizedImageBuffer = await sharp(imageBuffer) 294 .resize(width, height, { 295 fit: 'inside', 296 withoutEnlargement: true, 297 }) 298 .toBuffer() 299 300 // If still too large after resize, try compression 301 if (resizedImageBuffer.length > IMAGE_TARGET_RAW_SIZE) { 302 // For PNGs, try PNG compression first to preserve transparency 303 if (isPng) { 304 const pngCompressed = await sharp(imageBuffer) 305 .resize(width, height, { 306 fit: 'inside', 307 withoutEnlargement: true, 308 }) 309 .png({ compressionLevel: 9, palette: true }) 310 .toBuffer() 311 if (pngCompressed.length <= IMAGE_TARGET_RAW_SIZE) { 312 return { 313 buffer: pngCompressed, 314 mediaType: 'png', 315 dimensions: { 316 originalWidth, 317 originalHeight, 318 displayWidth: width, 319 displayHeight: height, 320 }, 321 } 322 } 323 } 324 325 // Try JPEG with progressively lower quality 326 for (const quality of [80, 60, 40, 20]) { 327 const compressedBuffer = await sharp(imageBuffer) 328 .resize(width, height, { 329 fit: 'inside', 330 withoutEnlargement: true, 331 }) 332 .jpeg({ quality }) 333 .toBuffer() 334 if (compressedBuffer.length <= IMAGE_TARGET_RAW_SIZE) { 335 return { 336 buffer: compressedBuffer, 337 mediaType: 'jpeg', 338 dimensions: { 339 originalWidth, 340 originalHeight, 341 displayWidth: width, 342 displayHeight: height, 343 }, 344 } 345 } 346 } 347 // If still too large, resize smaller and compress aggressively 348 const smallerWidth = Math.min(width, 1000) 349 const smallerHeight = Math.round( 350 (height * smallerWidth) / Math.max(width, 1), 351 ) 352 logForDebugging('Still too large, compressing with JPEG') 353 const compressedBuffer = await sharp(imageBuffer) 354 .resize(smallerWidth, smallerHeight, { 355 fit: 'inside', 356 withoutEnlargement: true, 357 }) 358 .jpeg({ quality: 20 }) 359 .toBuffer() 360 logForDebugging(`JPEG compressed buffer size: ${compressedBuffer.length}`) 361 return { 362 buffer: compressedBuffer, 363 mediaType: 'jpeg', 364 dimensions: { 365 originalWidth, 366 originalHeight, 367 displayWidth: smallerWidth, 368 displayHeight: smallerHeight, 369 }, 370 } 371 } 372 373 return { 374 buffer: resizedImageBuffer, 375 mediaType: normalizedMediaType, 376 dimensions: { 377 originalWidth, 378 originalHeight, 379 displayWidth: width, 380 displayHeight: height, 381 }, 382 } 383 } catch (error) { 384 // Log the error and emit analytics event 385 logError(error as Error) 386 const errorType = classifyImageError(error) 387 const errorMsg = errorMessage(error) 388 logEvent('tengu_image_resize_failed', { 389 original_size_bytes: originalSize, 390 error_type: errorType, 391 error_message_hash: hashString(errorMsg), 392 }) 393 394 // Detect actual format from magic bytes instead of trusting extension 395 const detected = detectImageFormatFromBuffer(imageBuffer) 396 const normalizedExt = detected.slice(6) // Remove 'image/' prefix 397 398 // Calculate the base64 size (API limit is on base64-encoded length) 399 const base64Size = Math.ceil((originalSize * 4) / 3) 400 401 // Size-under-5MB does not imply dimensions-under-cap. Don't return the 402 // raw buffer if the PNG header says it's oversized — fall through to 403 // ImageResizeError instead. PNG sig is 8 bytes, IHDR dims at 16-24. 404 const overDim = 405 imageBuffer.length >= 24 && 406 imageBuffer[0] === 0x89 && 407 imageBuffer[1] === 0x50 && 408 imageBuffer[2] === 0x4e && 409 imageBuffer[3] === 0x47 && 410 (imageBuffer.readUInt32BE(16) > IMAGE_MAX_WIDTH || 411 imageBuffer.readUInt32BE(20) > IMAGE_MAX_HEIGHT) 412 413 // If original image's base64 encoding is within API limit, allow it through uncompressed 414 if (base64Size <= API_IMAGE_MAX_BASE64_SIZE && !overDim) { 415 logEvent('tengu_image_resize_fallback', { 416 original_size_bytes: originalSize, 417 base64_size_bytes: base64Size, 418 error_type: errorType, 419 }) 420 return { buffer: imageBuffer, mediaType: normalizedExt } 421 } 422 423 // Image is too large and we failed to compress it - fail with user-friendly error 424 throw new ImageResizeError( 425 overDim 426 ? `Unable to resize image — dimensions exceed the ${IMAGE_MAX_WIDTH}x${IMAGE_MAX_HEIGHT}px limit and image processing failed. ` + 427 `Please resize the image to reduce its pixel dimensions.` 428 : `Unable to resize image (${formatFileSize(originalSize)} raw, ${formatFileSize(base64Size)} base64). ` + 429 `The image exceeds the 5MB API limit and compression failed. ` + 430 `Please resize the image manually or use a smaller image.`, 431 ) 432 } 433} 434 435export interface ImageBlockWithDimensions { 436 block: ImageBlockParam 437 dimensions?: ImageDimensions 438} 439 440/** 441 * Resizes an image content block if needed 442 * Takes an image ImageBlockParam and returns a resized version if necessary 443 * Also returns dimension information for coordinate mapping 444 */ 445export async function maybeResizeAndDownsampleImageBlock( 446 imageBlock: ImageBlockParam, 447): Promise<ImageBlockWithDimensions> { 448 // Only process base64 images 449 if (imageBlock.source.type !== 'base64') { 450 return { block: imageBlock } 451 } 452 453 // Decode base64 to buffer 454 const imageBuffer = Buffer.from(imageBlock.source.data, 'base64') 455 const originalSize = imageBuffer.length 456 457 // Extract extension from media type 458 const mediaType = imageBlock.source.media_type 459 const ext = mediaType?.split('/')[1] || 'png' 460 461 // Resize if needed 462 const resized = await maybeResizeAndDownsampleImageBuffer( 463 imageBuffer, 464 originalSize, 465 ext, 466 ) 467 468 // Return resized image block with dimension info 469 return { 470 block: { 471 type: 'image', 472 source: { 473 type: 'base64', 474 media_type: 475 `image/${resized.mediaType}` as Base64ImageSource['media_type'], 476 data: resized.buffer.toString('base64'), 477 }, 478 }, 479 dimensions: resized.dimensions, 480 } 481} 482 483/** 484 * Compresses an image buffer to fit within a maximum byte size. 485 * 486 * Uses a multi-strategy fallback approach because simple compression often fails for 487 * large screenshots, high-resolution photos, or images with complex gradients. Each 488 * strategy is progressively more aggressive to handle edge cases where earlier 489 * strategies produce files still exceeding the size limit. 490 * 491 * Strategy (from FileReadTool): 492 * 1. Try to preserve original format (PNG, JPEG, WebP) with progressive resizing 493 * 2. For PNG: Use palette optimization and color reduction if needed 494 * 3. Last resort: Convert to JPEG with aggressive compression 495 * 496 * This ensures images fit within context windows while maintaining format when possible. 497 */ 498export async function compressImageBuffer( 499 imageBuffer: Buffer, 500 maxBytes: number = IMAGE_TARGET_RAW_SIZE, 501 originalMediaType?: string, 502): Promise<CompressedImageResult> { 503 // Extract format from originalMediaType if provided (e.g., "image/png" -> "png") 504 const fallbackFormat = originalMediaType?.split('/')[1] || 'jpeg' 505 const normalizedFallback = fallbackFormat === 'jpg' ? 'jpeg' : fallbackFormat 506 507 try { 508 const sharp = await getImageProcessor() 509 const metadata = await sharp(imageBuffer).metadata() 510 const format = metadata.format || normalizedFallback 511 const originalSize = imageBuffer.length 512 513 const context: ImageCompressionContext = { 514 imageBuffer, 515 metadata, 516 format, 517 maxBytes, 518 originalSize, 519 } 520 521 // If image is already within size limit, return as-is without processing 522 if (originalSize <= maxBytes) { 523 return createCompressedImageResult(imageBuffer, format, originalSize) 524 } 525 526 // Try progressive resizing with format preservation 527 const resizedResult = await tryProgressiveResizing(context, sharp) 528 if (resizedResult) { 529 return resizedResult 530 } 531 532 // For PNG, try palette optimization 533 if (format === 'png') { 534 const palettizedResult = await tryPalettePNG(context, sharp) 535 if (palettizedResult) { 536 return palettizedResult 537 } 538 } 539 540 // Try JPEG conversion with moderate compression 541 const jpegResult = await tryJPEGConversion(context, 50, sharp) 542 if (jpegResult) { 543 return jpegResult 544 } 545 546 // Last resort: ultra-compressed JPEG 547 return await createUltraCompressedJPEG(context, sharp) 548 } catch (error) { 549 // Log the error and emit analytics event 550 logError(error as Error) 551 const errorType = classifyImageError(error) 552 const errorMsg = errorMessage(error) 553 logEvent('tengu_image_compress_failed', { 554 original_size_bytes: imageBuffer.length, 555 max_bytes: maxBytes, 556 error_type: errorType, 557 error_message_hash: hashString(errorMsg), 558 }) 559 560 // If original image is within the requested limit, allow it through 561 if (imageBuffer.length <= maxBytes) { 562 // Detect actual format from magic bytes instead of trusting the provided media type 563 const detected = detectImageFormatFromBuffer(imageBuffer) 564 return { 565 base64: imageBuffer.toString('base64'), 566 mediaType: detected, 567 originalSize: imageBuffer.length, 568 } 569 } 570 571 // Image is too large and compression failed - throw error 572 throw new ImageResizeError( 573 `Unable to compress image (${formatFileSize(imageBuffer.length)}) to fit within ${formatFileSize(maxBytes)}. ` + 574 `Please use a smaller image.`, 575 ) 576 } 577} 578 579/** 580 * Compresses an image buffer to fit within a token limit. 581 * Converts tokens to bytes using the formula: maxBytes = (maxTokens / 0.125) * 0.75 582 */ 583export async function compressImageBufferWithTokenLimit( 584 imageBuffer: Buffer, 585 maxTokens: number, 586 originalMediaType?: string, 587): Promise<CompressedImageResult> { 588 // Convert token limit to byte limit 589 // base64 uses about 4/3 the original size, so we reverse this 590 const maxBase64Chars = Math.floor(maxTokens / 0.125) 591 const maxBytes = Math.floor(maxBase64Chars * 0.75) 592 593 return compressImageBuffer(imageBuffer, maxBytes, originalMediaType) 594} 595 596/** 597 * Compresses an image block to fit within a maximum byte size. 598 * Wrapper around compressImageBuffer for ImageBlockParam. 599 */ 600export async function compressImageBlock( 601 imageBlock: ImageBlockParam, 602 maxBytes: number = IMAGE_TARGET_RAW_SIZE, 603): Promise<ImageBlockParam> { 604 // Only process base64 images 605 if (imageBlock.source.type !== 'base64') { 606 return imageBlock 607 } 608 609 // Decode base64 to buffer 610 const imageBuffer = Buffer.from(imageBlock.source.data, 'base64') 611 612 // Check if already within size limit 613 if (imageBuffer.length <= maxBytes) { 614 return imageBlock 615 } 616 617 // Compress the image 618 const compressed = await compressImageBuffer(imageBuffer, maxBytes) 619 620 return { 621 type: 'image', 622 source: { 623 type: 'base64', 624 media_type: compressed.mediaType, 625 data: compressed.base64, 626 }, 627 } 628} 629 630// Helper functions for compression pipeline 631 632function createCompressedImageResult( 633 buffer: Buffer, 634 mediaType: string, 635 originalSize: number, 636): CompressedImageResult { 637 const normalizedMediaType = mediaType === 'jpg' ? 'jpeg' : mediaType 638 return { 639 base64: buffer.toString('base64'), 640 mediaType: 641 `image/${normalizedMediaType}` as Base64ImageSource['media_type'], 642 originalSize, 643 } 644} 645 646async function tryProgressiveResizing( 647 context: ImageCompressionContext, 648 sharp: SharpFunction, 649): Promise<CompressedImageResult | null> { 650 const scalingFactors = [1.0, 0.75, 0.5, 0.25] 651 652 for (const scalingFactor of scalingFactors) { 653 const newWidth = Math.round( 654 (context.metadata.width || 2000) * scalingFactor, 655 ) 656 const newHeight = Math.round( 657 (context.metadata.height || 2000) * scalingFactor, 658 ) 659 660 let resizedImage = sharp(context.imageBuffer).resize(newWidth, newHeight, { 661 fit: 'inside', 662 withoutEnlargement: true, 663 }) 664 665 // Apply format-specific optimizations 666 resizedImage = applyFormatOptimizations(resizedImage, context.format) 667 668 const resizedBuffer = await resizedImage.toBuffer() 669 670 if (resizedBuffer.length <= context.maxBytes) { 671 return createCompressedImageResult( 672 resizedBuffer, 673 context.format, 674 context.originalSize, 675 ) 676 } 677 } 678 679 return null 680} 681 682function applyFormatOptimizations( 683 image: SharpInstance, 684 format: string, 685): SharpInstance { 686 switch (format) { 687 case 'png': 688 return image.png({ 689 compressionLevel: 9, 690 palette: true, 691 }) 692 case 'jpeg': 693 case 'jpg': 694 return image.jpeg({ quality: 80 }) 695 case 'webp': 696 return image.webp({ quality: 80 }) 697 default: 698 return image 699 } 700} 701 702async function tryPalettePNG( 703 context: ImageCompressionContext, 704 sharp: SharpFunction, 705): Promise<CompressedImageResult | null> { 706 const palettePng = await sharp(context.imageBuffer) 707 .resize(800, 800, { 708 fit: 'inside', 709 withoutEnlargement: true, 710 }) 711 .png({ 712 compressionLevel: 9, 713 palette: true, 714 colors: 64, // Reduce colors to 64 for better compression 715 }) 716 .toBuffer() 717 718 if (palettePng.length <= context.maxBytes) { 719 return createCompressedImageResult(palettePng, 'png', context.originalSize) 720 } 721 722 return null 723} 724 725async function tryJPEGConversion( 726 context: ImageCompressionContext, 727 quality: number, 728 sharp: SharpFunction, 729): Promise<CompressedImageResult | null> { 730 const jpegBuffer = await sharp(context.imageBuffer) 731 .resize(600, 600, { 732 fit: 'inside', 733 withoutEnlargement: true, 734 }) 735 .jpeg({ quality }) 736 .toBuffer() 737 738 if (jpegBuffer.length <= context.maxBytes) { 739 return createCompressedImageResult(jpegBuffer, 'jpeg', context.originalSize) 740 } 741 742 return null 743} 744 745async function createUltraCompressedJPEG( 746 context: ImageCompressionContext, 747 sharp: SharpFunction, 748): Promise<CompressedImageResult> { 749 const ultraCompressedBuffer = await sharp(context.imageBuffer) 750 .resize(400, 400, { 751 fit: 'inside', 752 withoutEnlargement: true, 753 }) 754 .jpeg({ quality: 20 }) 755 .toBuffer() 756 757 return createCompressedImageResult( 758 ultraCompressedBuffer, 759 'jpeg', 760 context.originalSize, 761 ) 762} 763 764/** 765 * Detect image format from a buffer using magic bytes 766 * @param buffer Buffer containing image data 767 * @returns Media type string (e.g., 'image/png', 'image/jpeg') or 'image/png' as default 768 */ 769export function detectImageFormatFromBuffer(buffer: Buffer): ImageMediaType { 770 if (buffer.length < 4) return 'image/png' // default 771 772 // Check PNG signature 773 if ( 774 buffer[0] === 0x89 && 775 buffer[1] === 0x50 && 776 buffer[2] === 0x4e && 777 buffer[3] === 0x47 778 ) { 779 return 'image/png' 780 } 781 782 // Check JPEG signature (FFD8FF) 783 if (buffer[0] === 0xff && buffer[1] === 0xd8 && buffer[2] === 0xff) { 784 return 'image/jpeg' 785 } 786 787 // Check GIF signature (GIF87a or GIF89a) 788 if (buffer[0] === 0x47 && buffer[1] === 0x49 && buffer[2] === 0x46) { 789 return 'image/gif' 790 } 791 792 // Check WebP signature (RIFF....WEBP) 793 if ( 794 buffer[0] === 0x52 && 795 buffer[1] === 0x49 && 796 buffer[2] === 0x46 && 797 buffer[3] === 0x46 798 ) { 799 if ( 800 buffer.length >= 12 && 801 buffer[8] === 0x57 && 802 buffer[9] === 0x45 && 803 buffer[10] === 0x42 && 804 buffer[11] === 0x50 805 ) { 806 return 'image/webp' 807 } 808 } 809 810 // Default to PNG if unknown 811 return 'image/png' 812} 813 814/** 815 * Detect image format from base64 data using magic bytes 816 * @param base64Data Base64 encoded image data 817 * @returns Media type string (e.g., 'image/png', 'image/jpeg') or 'image/png' as default 818 */ 819export function detectImageFormatFromBase64( 820 base64Data: string, 821): ImageMediaType { 822 try { 823 const buffer = Buffer.from(base64Data, 'base64') 824 return detectImageFormatFromBuffer(buffer) 825 } catch { 826 // Default to PNG on any error 827 return 'image/png' 828 } 829} 830 831/** 832 * Creates a text description of image metadata including dimensions and source path. 833 * Returns null if no useful metadata is available. 834 */ 835export function createImageMetadataText( 836 dims: ImageDimensions, 837 sourcePath?: string, 838): string | null { 839 const { originalWidth, originalHeight, displayWidth, displayHeight } = dims 840 // Skip if dimensions are not available or invalid 841 // Note: checks for undefined/null and zero to prevent division by zero 842 if ( 843 !originalWidth || 844 !originalHeight || 845 !displayWidth || 846 !displayHeight || 847 displayWidth <= 0 || 848 displayHeight <= 0 849 ) { 850 // If we have a source path but no valid dimensions, still return source info 851 if (sourcePath) { 852 return `[Image source: ${sourcePath}]` 853 } 854 return null 855 } 856 // Check if image was resized 857 const wasResized = 858 originalWidth !== displayWidth || originalHeight !== displayHeight 859 860 // Only include metadata if there's useful info (resized or has source path) 861 if (!wasResized && !sourcePath) { 862 return null 863 } 864 865 // Build metadata parts 866 const parts: string[] = [] 867 868 if (sourcePath) { 869 parts.push(`source: ${sourcePath}`) 870 } 871 872 if (wasResized) { 873 const scaleFactor = originalWidth / displayWidth 874 parts.push( 875 `original ${originalWidth}x${originalHeight}, displayed at ${displayWidth}x${displayHeight}. Multiply coordinates by ${scaleFactor.toFixed(2)} to map to original image.`, 876 ) 877 } 878 879 return `[Image: ${parts.join(', ')}]` 880}