I am trying to get the x, y co-ordinates of the circle relative to it's parent using the pan responding in real time as I drag it around the screen.
according to the docs (https://facebook.github.io/react-native/docs/panresponder) locationY should be The Y position of the touch, relative to the element, however, it doesn't seem to be correct. Is there something I am missing?
import React, { Component } from 'react';
import PropTypes from 'prop-types';
import Obstacle from './Obstacle';
import {
StyleSheet,
Text,
View,
Dimensions,
TouchableOpacity,
PanResponder,
Animated
} from 'react-native';
import { Svg } from 'expo';
const { Circle, Rect } = Svg;
const NUM_OBSTACLES = 1;
const CIRCLE_RADIUS = 40;
const windowWidth = Dimensions.get('window').width;
const windowHeight = Dimensions.get('window').height;
class Canvas extends Component {
constructor(props) {
super(props);
this.state = {
pan: new Animated.ValueXY(),
mousePosition: { x: 0, y: 0 }
};
}
componentWillMount() {
// Add a listener for the delta value change
this._val = { x: 0, y: 0 };
this.state.pan.addListener(value => (this._val = value));
// Initialize PanResponder with move handling
this.panResponder = PanResponder.create({
onStartShouldSetPanResponder: (evt, gestureState) => true,
onStartShouldSetPanResponderCapture: (evt, gestureState) => true,
onMoveShouldSetPanResponder: (evt, gestureState) => true,
onMoveShouldSetPanResponderCapture: (evt, gestureState) => true,
onPanResponderGrant: (evt, gestureState) => {
this.state.pan.setOffset(this.state.pan.__getValue());
this.state.pan.setValue({ x: 0, y: 0 });
this.locationPageOffsetX =
evt.nativeEvent.pageX - evt.nativeEvent.locationX;
this.locationPageOffsetY =
evt.nativeEvent.pageY - evt.nativeEvent.locationY;
},
onPanResponderMove: Animated.event(
[null, { dx: this.state.pan.x, dy: this.state.pan.y }],
{
listener: (evt, gestureState) => {
console.log(
`locationX : ${evt.nativeEvent.locationX} locationY : ${
evt.nativeEvent.locationY
}`
);
}
}
)
});
}
render() {
const panStyle = {
transform: this.state.pan.getTranslateTransform()
};
// );
return (
<>
<View
style={{
borderWidth: 1,
height: '80%',
width: '100%',
backgroundColor: 'lightgrey'
}}
>
<Animated.View
{...this.panResponder.panHandlers}
style={[panStyle, styles.circle]}
/>
</View>
</>
);
}
}
const styles = StyleSheet.create({
circle: {
position: 'absolute',
backgroundColor: 'skyblue',
width: CIRCLE_RADIUS * 2,
height: CIRCLE_RADIUS * 2,
borderRadius: CIRCLE_RADIUS,
zIndex: 1
}
});
export default Canvas;
I seem to get similar values no matter where I drag the ball, I would expect these values to be closer to zero as the ball is in the top left corner:
locationX : 48 locationY : 48
locationX : 48 locationY : 47.5
locationX : 47 locationY : 48
locationX : 44 locationY : 46.5
Related
I'm new to react so please be nice,
I'm trying to animate my compass, so that every time the userLocation is updated, the arrow (in my code the png of the animated image) is rotated at the given angle (here rotation) so that it points at another location. For some reason, it seems like the rotation passed to the Animated.Image remains 0, because the image never rotates. Can someone land me a hand real quick.
Here's my code:
import {
Alert,
Animated,
Easing,
Linking,
StyleSheet,
Text,
View,
} from "react-native";
import React, { useEffect, useRef, useState } from "react";
import * as Location from "expo-location";
import * as geolib from "geolib";
import { COLORS } from "../../assets/Colors/Colors";
export default function DateFinder() {
const [hasForegroundPermissions, setHasForegroundPermissions] =
useState(null);
const [userLocation, setUserLocation] = useState(null);
const [userHeading, setUserHeading] = useState(null);
const [angle, setAngle] = useState(0);
const rotation = useRef(new Animated.Value(0)).current;
useEffect(() => {
const AccessLocation = async () => {
function appSettings() {
console.warn("Open settigs pressed");
if (Platform.OS === "ios") {
Linking.openURL("app-settings:");
} else RNAndroidOpenSettings.appDetailsSettings();
}
const appSettingsALert = () => {
Alert.alert(
"Allow Wassupp to Use your Location",
"Open your app settings to allow Wassupp to access your current position. Without it, you won't be able to use the love compass",
[
{
text: "Cancel",
onPress: () => console.warn("Cancel pressed"),
},
{ text: "Open settings", onPress: appSettings },
]
);
};
const foregroundPermissions =
await Location.requestForegroundPermissionsAsync();
if (
foregroundPermissions.canAskAgain == false ||
foregroundPermissions.status == "denied"
) {
appSettingsALert();
}
setHasForegroundPermissions(foregroundPermissions.status === "granted");
if (foregroundPermissions.status == "granted") {
const location = await Location.watchPositionAsync(
{
accuracy: Location.Accuracy.BestForNavigation,
activityType: Location.ActivityType.Fitness,
distanceInterval: 0,
},
(location) => {
setUserLocation(location);
}
);
const heading = await Location.watchHeadingAsync((heading) => {
setUserHeading(heading.trueHeading);
});
}
};
AccessLocation().catch(console.error);
}, []);
useEffect(() => {
if (userLocation != null) {
setAngle(getBearing() - userHeading);
rotateImage(angle); // Here's the call to the rotateImage function that should cause the value of rotation to be animated
}
}, [userLocation]);
const textPosition = JSON.stringify(userLocation);
const getBearing = () => {
const bearing = geolib.getGreatCircleBearing(
{
latitude: userLocation.coords.latitude,
longitude: userLocation.coords.longitude,
},
{
latitude: 45.47307231766645,
longitude: -73.86611198944459,
}
);
return bearing;
};
const rotateImage = (angle) => {
Animated.timing(rotation, {
toValue: angle,
duration: 1000,
easing: Easing.bounce,
useNativeDriver: true,
}).start();
};
return (
<View style={styles.background}>
<Text>{textPosition}</Text>
<Animated.Image
source={require("../../assets/Compass/Arrow_up.png")}
style={[styles.image, { transform: [{ rotate: `${rotation}deg` }] }]} // this is where it should get rotated but it doesn't for some reason
/>
</View>
);
}
const styles = StyleSheet.create({
background: {
backgroundColor: COLORS.background_Pale,
flex: 1,
// justifyContent: "flex-start",
//alignItems: "center",
},
image: {
flex: 1,
// height: null,
// width: null,
//alignItems: "center",
},
scrollView: {
backgroundColor: COLORS.background_Pale,
},
});
Your error is here
useEffect(() => {
if (userLocation != null) {
setAngle(getBearing() - userHeading);
rotateImage(angle); // Here's the call to the rotateImage function that should cause the value of rotation to be animated
}
}, [userLocation]);
The angle will be updated on the next render, so the rotation you do will always be a render behind. You could either store the result of getBearing and setAngle to that value as well as provide that value to rotateImage:
useEffect(() => {
if (userLocation != null) {
const a = getBearing() -userHeading;
setAngle(a);
rotateImage(a); // Here's the call to the rotateImage function that should cause the value of rotation to be animated
}
}, [userLocation]);
or you could use useEffect and listen for angle changes:
useEffect(() => {
rotateImage(angle)
}, [angle]);
I am trying to make a draggable button using react.
The button drags over the page in a proper manner but when I drop it. Its top and left values become negative(not even reset to their original top:0,left:0) i.e. the component goes out of the page.
code sand box link : code
main draggable.js component:
import React, { Component } from 'react';
import { Button } from '#material-ui/core';
class DraggableButton extends Component {
constructor() {
super();
this.state = {
dragging: false,
diffX: 0,
diffY: 0,
style: {
top: 0,
left: 0
}
}
}
handleMouseDown = (event) => {
console.log("element caught");
this.setState({
diffX: event.clientX - event.currentTarget.getBoundingClientRect().left,
diffY: event.clientY - event.currentTarget.getBoundingClientRect().top,
dragging: true
})
}
handleMouseMove = (event) => {
if (this.state.dragging) {
console.log("dragging");
let left = event.clientX - this.state.diffX;
let top = event.clientY - this.state.diffY;
this.setState({
style: {
left,
top
}
}, console.log("style ", this.state.style))
}
}
handleMouseUp = () => {
console.log('element released');
console.log('left value ', this.state.style.left);
console.log('top value ', this.state.style.top);
this.setState({
dragging: false,
})
}
render() {
return (
<Button
variant="contained" color="primary"
style={{ position: "absolute", ...this.state.style }}
draggable={true}
onDragStart={this.handleMouseDown}
onDrag={this.handleMouseMove}
onDragEnd={this.handleMouseUp}
// onMouseDown={this.handleMouseDown}
// onMouseMove={this.handleMouseMove}
// onMouseUp={this.handleMouseUp}
>
draggable button
</Button>
);
}
}
export default DraggableButton;
console screenshot :
As is visible in the image above at the time of dragging top: 193 left : 309 and as we dropped the element it turned to left: -109 top: -13.
why is this happening how can we fix it ?
In your handleMouseMove you need to check if event.clientX is a positive integer and then change the state, or else it will reduce the diffX value and it will be nevative. (On drag release this becomes 0)
let left = event.clientX - this.state.diffX;
handleMouseMove = (event) => {
if (this.state.dragging) {
let left = event.clientX - this.state.diffX;
let top = event.clientY - this.state.diffY;
if (event.clientX !== 0)
this.setState({
style: {
left,
top
}
});
}
};
Hello all,
I have looked at a lot of other posts related to this issue but my question isn't simply how to put the correct preview dimensions based on the aspect ratio chosen. Im trying to figure out how Snapchat is able to consistently have a full screen camera preview expanding all the way from the status bar to the bottom of the screen. Ive used const ratios = await cameraRef.current.getSupportedRatiosAsync() to get the aspect ratios available for my test device and the highest one is 16:9, which doesn't give me enough height to have a preview that covers the entire screen and doesn't look distorted and out of the ordinary. In the android OS camera app theres an aspect ratio called 'full' which takes all the realestate available on the screen to show the camera preview, but the aspect ratios getSupportedRatiosAsync() returns do not include this 'full' aspect ratio. Im trying to achieve this flush look where the camera preview takes the entire screen height available and does not distort, but I end up having the black bar at the bottom because I have to take into account the aspect ratio of the camera and put the height of the preview acordingly. Is there something i'm missing here? When I simply put a flex:1 for the camera preview it takes over the screen but it ends up being a distorted preview because it's not fitting the aspect ratio of the camera.
import React, { useState, useRef, useEffect } from 'react'
import {
View,
Text,
StyleSheet,
Dimensions,
Modal,
ImageBackground,
Animated,
BackHandler,
} from 'react-native'
import { TouchableOpacity } from 'react-native-gesture-handler'
//expo camera
import { Camera } from 'expo-camera'
//expo AV
import { Video, AVPlaybackStatus } from 'expo-av'
//custom components
import HeaderX from '../components/HeaderX'
import CameraButton from '../components/CameraButton'
//ionicons
import { Entypo, Ionicons } from '#expo/vector-icons'
import { Icon } from 'react-native-elements'
//ionicons
//colors
import colors from '../constants/colors'
//safe area
import { useSafeAreaInsets } from 'react-native-safe-area-context'
//redux
import { takePicture } from '../store/camera/actions'
import { useDispatch, useSelector } from 'react-redux'
// MediaLibrary
import * as MediaLibrary from 'expo-media-library'
//gesture handlers
import {
PinchGestureHandler,
PinchGestureHandlerGestureEvent,
State,
TapGestureHandler,
TapGestureHandlerStateChangeEvent,
} from 'react-native-gesture-handler'
import Reanimated, {
Extrapolate,
interpolate,
useAnimatedGestureHandler,
useAnimatedProps,
useSharedValue,
useAnimatedStyle,
runOnJS,
runOnUI,
} from 'react-native-reanimated'
//nav 5
import { useFocusEffect } from '#react-navigation/native'
//status bar
import { StatusBar } from 'expo-status-bar'
const { height, width } = Dimensions.get('window')
console.log('🚀 ~ file: CameraScreen.js ~ line 68 ~ height', height)
console.log('🚀 ~ file: CameraScreen.js ~ line 68 ~ width', width)
const CameraScreen = ({ navigation, route }) => {
let checkMarkSet = null
if (route.params) {
checkMarkSet = true
}
// RATIO SETTER
const [imagePadding, setImagePadding] = useState(0)
const [ratio, setRatio] = useState('4:3') // default is 4:3
const screenRatio = height / width
const [isRatioSet, setIsRatioSet] = useState(false)
async function prepareRatio() {
let desiredRatio = '4:3' // Start with the system default
// This issue only affects Android
if (Platform.OS === 'android') {
const ratios = await cameraRef.current.getSupportedRatiosAsync()
let distances = {}
let realRatios = {}
let minDistance = null
for (const ratio of ratios) {
const parts = ratio.split(':')
const ratioHeight = parseInt(parts[0])
const ratioWidth = parseInt(parts[1])
const realRatio = ratioHeight / ratioWidth
realRatios[ratio] = realRatio
// ratio can't be taller than screen, so we don't want an abs()
const distance = screenRatio - realRatio
distances[ratio] = realRatio
if (minDistance == null) {
minDistance = ratio
} else {
if (distance >= 0 && distance < distances[minDistance]) {
minDistance = ratio
}
}
}
// set the best match
desiredRatio = minDistance
// calculate the difference between the camera width and the screen height
const remainder = Math.floor(
(height - realRatios[desiredRatio] * width) / 2
)
// set the preview padding and preview ratio
setImagePadding(remainder / 2)
console.log(`okay look ${remainder / 2}`)
setRatio(desiredRatio)
// Set a flag so we don't do this
// calculation each time the screen refreshes
setIsRatioSet(true)
}
}
const setCameraReady = async () => {
if (!isRatioSet) {
await prepareRatio()
}
}
// RATIO SETTER
const [type, setType] = useState(Camera.Constants.Type.back)
const [activateCamera, setActivateCamera] = useState(false)
const [video, setVideo] = useState('')
const [showVideoModal, setShowVideoModal] = useState(false)
const insets = useSafeAreaInsets()
useFocusEffect(() => {
if (navigation.isFocused()) {
setActivateCamera(true)
}
})
const [pic, setPic] = useState(null)
const [showModal, setShowModal] = useState(false)
const cameraRef = useRef()
const dispatch = useDispatch()
const [zooming, setZooming] = useState(0)
//camera settings
const [flashMode, setFlashMode] = useState('off')
// const picTaken = useSelector((state) => state.cameraReducer.pictureUri)
// console.log(
// '🚀 ~ file: CameraScreen.js ~ line 36 ~ CameraScreen ~ picTaken',
// picTaken
// )
// camera Functions
async function takePictureHandler() {
try {
if (cameraRef.current) {
const options = {
quality: 0.5,
base64: true,
skipProcessing: true,
}
let photo = await cameraRef.current.takePictureAsync(options)
setPic(photo.uri)
dispatch(takePicture(photo.uri))
setShowModal(true)
}
} catch (err) {
console.log(err)
}
// setPickedImage(image.uri)
// props.onImageTaken(image.uri)
}
function flipCameraHandler() {
setType(
type === Camera.Constants.Type.back
? Camera.Constants.Type.front
: Camera.Constants.Type.back
)
}
function flashSwitchHandler() {
if (flashMode === 'off') {
setFlashMode('on')
}
if (flashMode === 'on') {
setFlashMode('off')
}
}
async function savePictureLocallyHandler(localUri) {
const { status } = await MediaLibrary.getPermissionsAsync()
if (status === 'undetermined') {
const { status } = await MediaLibrary.requestPermissionsAsync()
if (status === 'granted') {
const asset = await MediaLibrary.createAssetAsync(localUri)
}
}
if (status === 'granted') {
const asset = await MediaLibrary.createAssetAsync(localUri)
if (asset) {
//display check mark showing it was saved.
}
}
if (status === 'denied') {
console.log('Open settings and give permission')
}
}
// zoom gesture handler
const zoom = useSharedValue(0)
const MAX_ZOOM_FACTOR = 20
const SCALE_FULL_ZOOM = 20
const formatMaxZoom = 1
const maxZoomFactor = Math.min(formatMaxZoom, MAX_ZOOM_FACTOR)
const neutralZoomScaled = (neutralZoom / maxZoomFactor) * formatMaxZoom
const maxZoomScaled = (1 / formatMaxZoom) * maxZoomFactor
const neutralZoom = 0
useAnimatedProps(
() => ({
zoom: interpolate(
zoom.value,
[0, neutralZoomScaled, 1],
[0, neutralZoom, maxZoomScaled],
Extrapolate.CLAMP
),
}),
[maxZoomScaled, neutralZoom, neutralZoomScaled, zoom]
)
function updateValue() {
setZooming(zoom.value)
}
function willThisWork() {
'worklet'
runOnJS(updateValue)()
}
const onPinchGesture = useAnimatedGestureHandler({
onStart: (_, context) => {
context.startZoom = zoom.value
},
onActive: (event, context) => {
// trying to map the scale gesture to a linear zoom here
const startZoom = context.startZoom ?? 0
const scale = interpolate(
event.scale,
[1 - 1 / SCALE_FULL_ZOOM, 1, SCALE_FULL_ZOOM],
[-1, 0, 1],
Extrapolate.CLAMP
)
zoom.value = interpolate(
scale,
[-1, 0, 1],
[0, startZoom, 1],
Extrapolate.CLAMP
)
willThisWork()
},
})
// VIDEO RECORDING
async function beginRecording() {
console.log('started')
let video = await cameraRef.current.recordAsync()
setVideo(video)
// setPic(photo.uri)
// dispatch(takePicture(photo.uri))
}
async function endRecording() {
console.log('ended')
cameraRef.current.stopRecording()
setShowVideoModal(true)
}
return (
<View
style={{
...styles.container,
// paddingTop: Platform.OS === 'android' ? insets.top : null,
}}
>
<StatusBar
style=""
translucent
backgroundColor="rgba(255,255,255,0)"
/>
<PinchGestureHandler onGestureEvent={onPinchGesture}>
<Reanimated.View
style={{
flex: 1,
backgroundColor: 'back',
justifyContent: 'flex-start',
paddingBottom: imagePadding * 4,
}}
>
{activateCamera && (
<Camera
style={{
// marginTop: imagePadding,
// marginBottom: imagePadding,
flex: 1,
// height: 733,
}}
ref={cameraRef}
type={type}
flashMode={flashMode}
zoom={zooming}
onCameraReady={setCameraReady}
ratio={ratio}
maxDuration={10000}
autoFocus="on"
>
<View
style={[
styles.contentContainer,
{
paddingTop: insets.top,
paddingBottom: insets.bottom,
top: insets.top,
bottom: insets.bottom,
},
]}
>
<View style={styles.topLeftCont}>
<TouchableOpacity
onPress={flipCameraHandler}
>
<Entypo
name="loop"
size={27}
color="white"
style={styles.flipIcon}
/>
</TouchableOpacity>
<TouchableOpacity
onPress={flashSwitchHandler}
>
<Ionicons
name={
flashMode !== 'off'
? 'flash'
: 'flash-off'
}
size={27}
color="white"
style={styles.cameraSettingsButton}
/>
</TouchableOpacity>
</View>
<CameraButton
style={{
...styles.floatingPlusCont,
left: width / 2 - 45,
}}
onLongPress={beginRecording}
onEndPress={endRecording}
onTap={takePictureHandler}
/>
</View>
</Camera>
)}
</Reanimated.View>
</PinchGestureHandler>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
justifyContent: 'flex-start',
},
contentContainer: {
flex: 1,
position: 'absolute',
right: 0,
left: 0,
},
camera: {
flex: 1,
flexDirection: 'row',
},
topLeftCont: {
position: 'absolute',
width: 45,
top: 0,
right: 10,
borderRadius: 20,
backgroundColor: 'rgba(184,184,184,0.42)',
alignItems: 'center',
justifyContent: 'space-between',
// flexDirection: 'row',
padding: 5,
},
flipIcon: {
marginVertical: 7,
transform: [
{
rotate: '90deg',
},
],
},
cameraSettingsButton: { marginVertical: 7 },
modal: {
flex: 1,
position: 'absolute',
top: 0,
right: 0,
left: 0,
bottom: 0,
},
takenImage: { flex: 1 },
bottomCont: {
flex: 1,
justifyContent: 'flex-end',
padding: 10,
},
bottomButtonsCont: {
width: '100%',
justifyContent: 'space-between',
flexDirection: 'row',
paddingHorizontal: 5,
},
floatingPlusCont: {
bottom: 25,
position: 'absolute',
width: 90,
height: 90,
borderRadius: 45,
},
loadingView: {
backgroundColor: 'rgba(0,0,0,0.4)',
justifyContent: 'center',
alignItems: 'center',
},
})
export default CameraScreen
I have created a simple PanGestureHandler and want it to start from previous position when the gesture begins. I am setting the translationY value to the offsetY value when the gesture ends, which works perfectly and when the gesture begins I'm setting the sum of offsetY(which is the previous translationY) and the translationY to translationY, which on debugging shows the correct translation value. But that doesn't reflect on the View. Since I'm new to reanimated I don't know why that happens. I also couldn't find much resources on the implementation of gesture handlers using functional components.
Any ideas on how to fix this?
My Code:
import React from 'react'
import { Dimensions, Text } from 'react-native'
import { PanGestureHandler, State } from 'react-native-gesture-handler'
import Animated, { add, block, cond, debug, eq, event, Extrapolate, interpolate, set, useCode, useValue } from 'react-native-reanimated';
const {height,width}=Dimensions.get("window")
export default function Pan() {
const translationY = useValue(0)
const offsetY = useValue(0)
const gestureState = useValue(State.UNDETERMINED)
const onGestureEvent = event([{
nativeEvent: {
translationY,
state: gestureState
},
}], { useNativeDriver: true });
useCode(() => block([
cond(eq(gestureState, State.BEGAN), [set(translationY, add(translationY, offsetY)),debug('offsetY', translationY)]),
cond(eq(gestureState, State.END), [set(offsetY, translationY), debug('translateY', offsetY)])
]), [])
const translateY = translationY
return (
<PanGestureHandler {...{onGestureEvent}} onHandlerStateChange={onGestureEvent}>
<Animated.View style={{ height: height * 45 / 100, backgroundColor:'red', width: width, transform: [{ translateY }] }}>
<Text>PanGesture</Text>
</Animated.View>
</PanGestureHandler>
)
}
import React from "react";
import {Dimensions, Text, View} from "react-native";
import {PanGestureHandler} from "react-native-gesture-handler";
import Animated, {
Extrapolate,
useSharedValue,
useAnimatedGestureHandler,
interpolate,
useAnimatedStyle,
} from "react-native-reanimated";
const {height, width} = Dimensions.get("window");
const Test: React.FC = () => {
const translationY = useSharedValue(0);
const onGestureEvent = useAnimatedGestureHandler(
{
onStart: (_, ctx) => {
ctx.y = translationY.value;
},
onActive: (event, ctx) => {
translationY.value = event.translationY + ctx.y;
},
},
[translationY.value],
);
const animatedStyles = useAnimatedStyle(() => {
const translateY = interpolate(
translationY.value,
[0, height - (height * 45) / 100],
[0, height - (height * 45) / 100],
Extrapolate.CLAMP,
);
return {
height: (height * 45) / 100,
backgroundColor: "red",
width,
transform: [{translateY}],
};
}, [translationY.value]);
return (
<View style={{height, width, backgroundColor: "yellow"}}>
<PanGestureHandler {...{onGestureEvent}}>
<Animated.View style={animatedStyles}>
<Text>PanGesture</Text>
</Animated.View>
</PanGestureHandler>
</View>
);
};
export default Test;
I 'm new in React and i'm tryind to make draggable background, but no matter how hard I try, nothing happens. I found some code on jQuery, but there many advices that it's bad practice use jQuery in React.
Maybe i make something wrong.
Thanks in advance
Here's my React code
import React from "react";
import "../styles/board.css";
class Board extends React.Component {
constructor(props) {
super(props);
this.state = { mouseCliked: 0, startX: 0, startY: 0 };
}
mouseDown(e) {
this.setState({ mouseCliked: 1, startX: e.clientX, startY: e.clientY });
}
mouseUp(e) {
this.setState({ mouseCliked: 0, startX: e.clientX, startY: e.clientY });
}
mouseMove = (e) => {
let newPosY = e.clientY - this.stateY;
let newPosX = e.clientX - this.stateX;
if (this.state.mouseClicked) {
e.target.style.backgroundPositionX += newPosX;
e.target.style.backgroundPositionY += newPosY;
}
};
render() {
return (
<div
onMouseMove={this.mouseMove.bind(this)}
onMouseUp={this.mouseUp.bind(this)}
onMouseDown={this.mouseDown.bind(this)}
className="background-image"
>
</div>
);
}
}
export default Board;
CSS:
width:300px;
height: 300px;
background-size: 1000px;
background-position-x: 0;
background-position-y: 0;
background-image: url('https://images.unsplash.com/photo-1452723312111-3a7d0db0e024?crop=entropy&dpr=2&fit=crop&fm=jpg&h=750&ixjsv=2.1.0&ixlib=rb-0.3.5&q=50&w=1450.jpg');
}
I have encountered this problem and also checked jQuery - drag div css background
Finally I came up with this solution and seemed working fine.
const imageStyleInitialValue = {
backgroundImage: "",
backgroundPosition: "0 0",
backgroundSize: "0 0",
height: 0,
width: 0,
};
const [startPoint, setStartPoint] = useState({x: 0, y: 0});
const [dragging, setDragging] = useState(false);
const [imageStartPos, setImageStartPos] = useState([0, 0]);
const [imageStyles, setImageStyles] = useState<ImageStyles>(imageStyleInitialValue);
// add onMouseMove={handleDragImage} to the image component
const handleDragImage = (e) => {
if (dragging) {
const deltaX = e.clientX - startPoint.x;
const deltaY = e.clientY - startPoint.y;
setImageStyles({...imageStyles,
backgroundPosition:
`${imageStartPos[0] + deltaX} ${imageStartPos[1] + deltaY}`
})
}
};
// add onMouseDown={handleStartDragImage} to the image component
const handleStartDragImage = (e) => {
setDragging(true);
const backgroundPosArray = imageStyles.backgroundPosition.split(" ").map(value => Number(value));
setImageStartPos(backgroundPosArray);
setStartPoint({x: e.clientX, y: e.clientY});
}
// add onMouseUp={handleEndDragImage} to the top component because you want
// to set Dragging to false when the dragging ends outside of the image
const handleEndDragImage = (e) => {
setDragging(false)
};