mirror of https://git.lenooby09.tech/LeNooby09/social-app.git
at sys-log 475 lines 14 kB view raw
1import React, {useState} from 'react' 2import {ActivityIndicator, StyleSheet} from 'react-native' 3import { 4 Gesture, 5 GestureDetector, 6 PanGesture, 7} from 'react-native-gesture-handler' 8import Animated, { 9 runOnJS, 10 SharedValue, 11 useAnimatedReaction, 12 useAnimatedRef, 13 useAnimatedStyle, 14 useSharedValue, 15 withSpring, 16} from 'react-native-reanimated' 17import {Image} from 'expo-image' 18 19import type { 20 Dimensions as ImageDimensions, 21 ImageSource, 22 Transform, 23} from '../../@types' 24import { 25 applyRounding, 26 createTransform, 27 prependPan, 28 prependPinch, 29 prependTransform, 30 readTransform, 31 TransformMatrix, 32} from '../../transforms' 33 34const MIN_SCREEN_ZOOM = 2 35const MAX_ORIGINAL_IMAGE_ZOOM = 2 36 37const initialTransform = createTransform() 38 39type Props = { 40 imageSrc: ImageSource 41 onRequestClose: () => void 42 onTap: () => void 43 onZoom: (isZoomed: boolean) => void 44 onLoad: (dims: ImageDimensions) => void 45 isScrollViewBeingDragged: boolean 46 showControls: boolean 47 measureSafeArea: () => { 48 x: number 49 y: number 50 width: number 51 height: number 52 } 53 imageAspect: number | undefined 54 imageDimensions: ImageDimensions | undefined 55 dismissSwipePan: PanGesture 56 transforms: Readonly< 57 SharedValue<{ 58 scaleAndMoveTransform: Transform 59 cropFrameTransform: Transform 60 cropContentTransform: Transform 61 isResting: boolean 62 isHidden: boolean 63 }> 64 > 65} 66const ImageItem = ({ 67 imageSrc, 68 onTap, 69 onZoom, 70 onLoad, 71 isScrollViewBeingDragged, 72 measureSafeArea, 73 imageAspect, 74 imageDimensions, 75 dismissSwipePan, 76 transforms, 77}: Props) => { 78 const [isScaled, setIsScaled] = useState(false) 79 const committedTransform = useSharedValue(initialTransform) 80 const panTranslation = useSharedValue({x: 0, y: 0}) 81 const pinchOrigin = useSharedValue({x: 0, y: 0}) 82 const pinchScale = useSharedValue(1) 83 const pinchTranslation = useSharedValue({x: 0, y: 0}) 84 const containerRef = useAnimatedRef() 85 86 // Keep track of when we're entering or leaving scaled rendering. 87 // Note: DO NOT move any logic reading animated values outside this function. 88 useAnimatedReaction( 89 () => { 90 if (pinchScale.get() !== 1) { 91 // We're currently pinching. 92 return true 93 } 94 const [, , committedScale] = readTransform(committedTransform.get()) 95 if (committedScale !== 1) { 96 // We started from a pinched in state. 97 return true 98 } 99 // We're at rest. 100 return false 101 }, 102 (nextIsScaled, prevIsScaled) => { 103 if (nextIsScaled !== prevIsScaled) { 104 runOnJS(handleZoom)(nextIsScaled) 105 } 106 }, 107 ) 108 109 function handleZoom(nextIsScaled: boolean) { 110 setIsScaled(nextIsScaled) 111 onZoom(nextIsScaled) 112 } 113 114 // On Android, stock apps prevent going "out of bounds" on pan or pinch. You should "bump" into edges. 115 // If the user tried to pan too hard, this function will provide the negative panning to stay in bounds. 116 function getExtraTranslationToStayInBounds( 117 candidateTransform: TransformMatrix, 118 screenSize: {width: number; height: number}, 119 ) { 120 'worklet' 121 if (!imageAspect) { 122 return [0, 0] 123 } 124 const [nextTranslateX, nextTranslateY, nextScale] = 125 readTransform(candidateTransform) 126 const scaledDimensions = getScaledDimensions( 127 imageAspect, 128 nextScale, 129 screenSize, 130 ) 131 const clampedTranslateX = clampTranslation( 132 nextTranslateX, 133 scaledDimensions.width, 134 screenSize.width, 135 ) 136 const clampedTranslateY = clampTranslation( 137 nextTranslateY, 138 scaledDimensions.height, 139 screenSize.height, 140 ) 141 const dx = clampedTranslateX - nextTranslateX 142 const dy = clampedTranslateY - nextTranslateY 143 return [dx, dy] 144 } 145 146 const pinch = Gesture.Pinch() 147 .onStart(e => { 148 'worklet' 149 const screenSize = measureSafeArea() 150 pinchOrigin.set({ 151 x: e.focalX - screenSize.width / 2, 152 y: e.focalY - screenSize.height / 2, 153 }) 154 }) 155 .onChange(e => { 156 'worklet' 157 const screenSize = measureSafeArea() 158 if (!imageDimensions) { 159 return 160 } 161 // Don't let the picture zoom in so close that it gets blurry. 162 // Also, like in stock Android apps, don't let the user zoom out further than 1:1. 163 const [, , committedScale] = readTransform(committedTransform.get()) 164 const maxCommittedScale = Math.max( 165 MIN_SCREEN_ZOOM, 166 (imageDimensions.width / screenSize.width) * MAX_ORIGINAL_IMAGE_ZOOM, 167 ) 168 const minPinchScale = 1 / committedScale 169 const maxPinchScale = maxCommittedScale / committedScale 170 const nextPinchScale = Math.min( 171 Math.max(minPinchScale, e.scale), 172 maxPinchScale, 173 ) 174 pinchScale.set(nextPinchScale) 175 176 // Zooming out close to the corner could push us out of bounds, which we don't want on Android. 177 // Calculate where we'll end up so we know how much to translate back to stay in bounds. 178 const t = createTransform() 179 prependPan(t, panTranslation.get()) 180 prependPinch(t, nextPinchScale, pinchOrigin.get(), pinchTranslation.get()) 181 prependTransform(t, committedTransform.get()) 182 const [dx, dy] = getExtraTranslationToStayInBounds(t, screenSize) 183 if (dx !== 0 || dy !== 0) { 184 const pt = pinchTranslation.get() 185 pinchTranslation.set({ 186 x: pt.x + dx, 187 y: pt.y + dy, 188 }) 189 } 190 }) 191 .onEnd(() => { 192 'worklet' 193 // Commit just the pinch. 194 let t = createTransform() 195 prependPinch( 196 t, 197 pinchScale.get(), 198 pinchOrigin.get(), 199 pinchTranslation.get(), 200 ) 201 prependTransform(t, committedTransform.get()) 202 applyRounding(t) 203 committedTransform.set(t) 204 205 // Reset just the pinch. 206 pinchScale.set(1) 207 pinchOrigin.set({x: 0, y: 0}) 208 pinchTranslation.set({x: 0, y: 0}) 209 }) 210 211 const pan = Gesture.Pan() 212 .averageTouches(true) 213 // Unlike .enabled(isScaled), this ensures that an initial pinch can turn into a pan midway: 214 .minPointers(isScaled ? 1 : 2) 215 .onChange(e => { 216 'worklet' 217 const screenSize = measureSafeArea() 218 if (!imageDimensions) { 219 return 220 } 221 222 const nextPanTranslation = {x: e.translationX, y: e.translationY} 223 let t = createTransform() 224 prependPan(t, nextPanTranslation) 225 prependPinch( 226 t, 227 pinchScale.get(), 228 pinchOrigin.get(), 229 pinchTranslation.get(), 230 ) 231 prependTransform(t, committedTransform.get()) 232 233 // Prevent panning from going out of bounds. 234 const [dx, dy] = getExtraTranslationToStayInBounds(t, screenSize) 235 nextPanTranslation.x += dx 236 nextPanTranslation.y += dy 237 panTranslation.set(nextPanTranslation) 238 }) 239 .onEnd(() => { 240 'worklet' 241 // Commit just the pan. 242 let t = createTransform() 243 prependPan(t, panTranslation.get()) 244 prependTransform(t, committedTransform.get()) 245 applyRounding(t) 246 committedTransform.set(t) 247 248 // Reset just the pan. 249 panTranslation.set({x: 0, y: 0}) 250 }) 251 252 const singleTap = Gesture.Tap().onEnd(() => { 253 'worklet' 254 runOnJS(onTap)() 255 }) 256 257 const doubleTap = Gesture.Tap() 258 .numberOfTaps(2) 259 .onEnd(e => { 260 'worklet' 261 const screenSize = measureSafeArea() 262 if (!imageDimensions || !imageAspect) { 263 return 264 } 265 const [, , committedScale] = readTransform(committedTransform.get()) 266 if (committedScale !== 1) { 267 // Go back to 1:1 using the identity vector. 268 let t = createTransform() 269 committedTransform.set(withClampedSpring(t)) 270 return 271 } 272 273 // Try to zoom in so that we get rid of the black bars (whatever the orientation was). 274 const screenAspect = screenSize.width / screenSize.height 275 const candidateScale = Math.max( 276 imageAspect / screenAspect, 277 screenAspect / imageAspect, 278 MIN_SCREEN_ZOOM, 279 ) 280 // But don't zoom in so close that the picture gets blurry. 281 const maxScale = Math.max( 282 MIN_SCREEN_ZOOM, 283 (imageDimensions.width / screenSize.width) * MAX_ORIGINAL_IMAGE_ZOOM, 284 ) 285 const scale = Math.min(candidateScale, maxScale) 286 287 // Calculate where we would be if the user pinched into the double tapped point. 288 // We won't use this transform directly because it may go out of bounds. 289 const candidateTransform = createTransform() 290 const origin = { 291 x: e.absoluteX - screenSize.width / 2, 292 y: e.absoluteY - screenSize.height / 2, 293 } 294 prependPinch(candidateTransform, scale, origin, {x: 0, y: 0}) 295 296 // Now we know how much we went out of bounds, so we can shoot correctly. 297 const [dx, dy] = getExtraTranslationToStayInBounds( 298 candidateTransform, 299 screenSize, 300 ) 301 const finalTransform = createTransform() 302 prependPinch(finalTransform, scale, origin, {x: dx, y: dy}) 303 committedTransform.set(withClampedSpring(finalTransform)) 304 }) 305 306 const composedGesture = isScrollViewBeingDragged 307 ? // If the parent is not at rest, provide a no-op gesture. 308 Gesture.Manual() 309 : Gesture.Exclusive( 310 dismissSwipePan, 311 Gesture.Simultaneous(pinch, pan), 312 doubleTap, 313 singleTap, 314 ) 315 316 const containerStyle = useAnimatedStyle(() => { 317 const {scaleAndMoveTransform, isHidden} = transforms.get() 318 // Apply the active adjustments on top of the committed transform before the gestures. 319 // This is matrix multiplication, so operations are applied in the reverse order. 320 let t = createTransform() 321 prependPan(t, panTranslation.get()) 322 prependPinch(t, pinchScale.get(), pinchOrigin.get(), pinchTranslation.get()) 323 prependTransform(t, committedTransform.get()) 324 const [translateX, translateY, scale] = readTransform(t) 325 const manipulationTransform = [ 326 {translateX}, 327 {translateY: translateY}, 328 {scale}, 329 ] 330 const screenSize = measureSafeArea() 331 return { 332 opacity: isHidden ? 0 : 1, 333 transform: scaleAndMoveTransform.concat(manipulationTransform), 334 width: screenSize.width, 335 maxHeight: screenSize.height, 336 alignSelf: 'center', 337 aspectRatio: imageAspect ?? 1 /* force onLoad */, 338 } 339 }) 340 341 const imageCropStyle = useAnimatedStyle(() => { 342 const {cropFrameTransform} = transforms.get() 343 return { 344 flex: 1, 345 overflow: 'hidden', 346 transform: cropFrameTransform, 347 } 348 }) 349 350 const imageStyle = useAnimatedStyle(() => { 351 const {cropContentTransform} = transforms.get() 352 return { 353 flex: 1, 354 transform: cropContentTransform, 355 opacity: imageAspect === undefined ? 0 : 1, 356 } 357 }) 358 359 const [showLoader, setShowLoader] = useState(false) 360 const [hasLoaded, setHasLoaded] = useState(false) 361 useAnimatedReaction( 362 () => { 363 return transforms.get().isResting && !hasLoaded 364 }, 365 (show, prevShow) => { 366 if (!prevShow && show) { 367 runOnJS(setShowLoader)(true) 368 } else if (prevShow && !show) { 369 runOnJS(setShowLoader)(false) 370 } 371 }, 372 ) 373 374 const type = imageSrc.type 375 const borderRadius = 376 type === 'circle-avi' ? 1e5 : type === 'rect-avi' ? 20 : 0 377 378 return ( 379 <GestureDetector gesture={composedGesture}> 380 <Animated.View 381 ref={containerRef} 382 style={[styles.container]} 383 renderToHardwareTextureAndroid> 384 <Animated.View style={containerStyle}> 385 {showLoader && ( 386 <ActivityIndicator 387 size="small" 388 color="#FFF" 389 style={styles.loading} 390 /> 391 )} 392 <Animated.View style={imageCropStyle}> 393 <Animated.View style={imageStyle}> 394 <Image 395 contentFit="contain" 396 source={{uri: imageSrc.uri}} 397 placeholderContentFit="contain" 398 placeholder={{uri: imageSrc.thumbUri}} 399 accessibilityLabel={imageSrc.alt} 400 onLoad={ 401 hasLoaded 402 ? undefined 403 : e => { 404 setHasLoaded(true) 405 onLoad({width: e.source.width, height: e.source.height}) 406 } 407 } 408 style={{flex: 1, borderRadius}} 409 accessibilityHint="" 410 accessibilityIgnoresInvertColors 411 cachePolicy="memory" 412 /> 413 </Animated.View> 414 </Animated.View> 415 </Animated.View> 416 </Animated.View> 417 </GestureDetector> 418 ) 419} 420 421const styles = StyleSheet.create({ 422 container: { 423 height: '100%', 424 overflow: 'hidden', 425 justifyContent: 'center', 426 }, 427 loading: { 428 position: 'absolute', 429 left: 0, 430 right: 0, 431 top: 0, 432 bottom: 0, 433 justifyContent: 'center', 434 }, 435}) 436 437function getScaledDimensions( 438 imageAspect: number, 439 scale: number, 440 screenSize: {width: number; height: number}, 441): ImageDimensions { 442 'worklet' 443 const screenAspect = screenSize.width / screenSize.height 444 const isLandscape = imageAspect > screenAspect 445 if (isLandscape) { 446 return { 447 width: scale * screenSize.width, 448 height: (scale * screenSize.width) / imageAspect, 449 } 450 } else { 451 return { 452 width: scale * screenSize.height * imageAspect, 453 height: scale * screenSize.height, 454 } 455 } 456} 457 458function clampTranslation( 459 value: number, 460 scaledSize: number, 461 screenSize: number, 462): number { 463 'worklet' 464 // Figure out how much the user should be allowed to pan, and constrain the translation. 465 const panDistance = Math.max(0, (scaledSize - screenSize) / 2) 466 const clampedValue = Math.min(Math.max(-panDistance, value), panDistance) 467 return clampedValue 468} 469 470function withClampedSpring(value: any) { 471 'worklet' 472 return withSpring(value, {overshootClamping: true}) 473} 474 475export default React.memo(ImageItem)