getting this weird error when I try to open a post in my app that contains photos inside html webview, the app is a react native app and retrieve posts from wordpress backend
https://i.imgur.com/738mF4Y.png
changed the view to inline with no success
my webview index.js
/** #format */
import React, { PureComponent } from 'react'
import PropTypes from 'prop-types'
import {
View,
Image,
Dimensions,
Linking,
WebView,
WebBrowser,
} from 'react-native'
import HTML from 'react-native-render-html'
import { Tools, Constants, error, warn, Config } from '#common'
import sanitizeHtml from 'sanitize-html'
const { height: PageHeight, width: PageWidth } = Dimensions.get('window')
export default class Index extends PureComponent {
static propTypes = {
html: PropTypes.any,
}
constructor(props) {
super(props)
this.state = { fontSize: Constants.fontText.size }
}
async componentWillMount() {
const fontSize = await Tools.getFontSizePostDetail()
this.setState({
fontSize,
})
}
onLinkPress = (url) => {
if (typeof WebBrowser !== 'undefined') {
WebBrowser.openBrowserAsync(url)
} else {
Linking.canOpenURL(url)
.then((supported) => {
if (!supported) {
} else {
return Linking.openURL(url)
}
})
.catch((err) => error('An error occurred', err))
}
}
render() {
const htmlContent = Config.EnableSanitizeHtml ? sanitizeHtml(this.props.html, {
allowedTags: [ 'b', 'p', 'i', 'img', 'em', 'strong', 'a' ],
allowedAttributes: {
'a': [ 'href' ],
'img' : ['src', 'alt', 'width', 'height']
},
allowedIframeHostnames: ['www.youtube.com']
}) : this.props.html
const fontSize = this.state.fontSize
? this.state.fontSize
: Constants.fontText.size
const tagsStyles = {
a: { color: '#333', fontSize },
strong: { color: '#333', fontSize, fontWeight: '700' },
p: { color: '#333', marginBottom: 5, fontSize, lineHeight: 24 },
em: { fontStyle: 'italic', fontSize },
video: { marginBottom: 5 },
img: { resizeMode: 'cover' },
ul: { color: '#333' },
li: { color: '#333' },
}
const renderers = {
img: (htmlAttribs, children, convertedCSSStyles, passProps) => {
const { src, width, height } = htmlAttribs
if (!src) {
return false
}
const newWidth = Dimensions.get('window').width - 20
const newHeight = height * newWidth / width
return (
<Image
key={passProps.key}
source={{ uri: src }}
style={{
width: newWidth,
height: newHeight,
resizeMode: 'contain',
}}
/>
)
},
iframe: (htmlAttribs, children, convertedCSSStyles, passProps) => {
if (htmlAttribs.src) {
const newWidth = PageWidth
const width = htmlAttribs.width
const height = htmlAttribs.height
const newHeight = height > 0 ? height * newWidth / width : width * 0.7
const url = htmlAttribs.src
return (
<WebView
renderLoading={() => <ActivityIndicator animating size="large" />}
originWhitelist={['*']}
canOpenURL={true}
key={`webview-${passProps.key}`}
source={{ uri: url }}
allowsInlineMediaPlayback={true}
mediaPlaybackRequiresUserAction={true}
javaScriptEnabled
scrollEnabled={false}
automaticallyAdjustContentInsets
style={{
width: PageWidth,
left: -12,
height: newHeight + 15,
}}
/>
)
}
},
}
// warn(['content:', htmlContent])
return (
<View style={{
padding: 12 }}>
<HTML
canOpenURL={true}
html={Constants.RTL ? `<div style="text-align: left;">${htmlContent}</div>` : htmlContent}
ignoredStyles={['font-family']}
renderers={renderers}
imagesMaxWidth={PageWidth}
tagsStyles={tagsStyles}
onLinkPress={(evt, href) => this.onLinkPress(href)}
staticContentMaxWidth={PageWidth}
/>
</View>
)
}
}
I am getting this error only with posts that has images inside the webview
I fixed this issue by using this prop allowedStyles={[]} in the Component
#Khuser Some of the web css styles are not supported by the HTML Renderer in this dependency that is why you are seeing this error.
Pls check the css styles in the HTML content and then ignore the styles which are not supported, like below by using the prop:
ignoredStyles={['font-family', 'display']}
This styles will be ignored.
Like mentioned in the previous answer you can also add this prop,
allowedStyles={[]}
This will help you fix the issue.
Related
I am learning the the basics of tensorflow.js. I have been able to load a model successfully and extract keypoints from the webcam using blazepose. However, I am noticing that when I start to draw keypoints to the canvas, there is a lot of flickering. I am assuming this is due to the setInterval that does the rendering, but I'm not sure how to remedy this so that the keypoints are rendered in "real-time". Thanks for the help!
import './App.css';
import React, { useEffect, useRef } from 'react';
import * as poseDetection from '#tensorflow-models/pose-detection';
import '#tensorflow/tfjs-backend-webgl';
import Webcam from "react-webcam";
import { drawPoints } from './utils/drawKeyPoints';
function App() {
const webcamRef = useRef(null);
const canvasRef = useRef(null);
async function initPoseDetection() {
const model = poseDetection.SupportedModels.BlazePose;
const config = {
runtime: 'tfjs',
modelType: 'full',
maxPoses: 1,
}
const detector = await poseDetection.createDetector(model, config)
return detector;
}
async function start(){
const detector = await initPoseDetection();
setInterval(() => {
render(detector)
}, 100)
}
async function render(detector) {
if (
typeof webcamRef.current !== "undefined" &&
webcamRef.current !== null &&
webcamRef.current.video.readyState === 4
) {
// Get Video Properties
const video = webcamRef.current.video;
const videoWidth = webcamRef.current.video.videoWidth;
const videoHeight = webcamRef.current.video.videoHeight;
// Set video width
webcamRef.current.video.width = videoWidth;
webcamRef.current.video.height = videoHeight;
// Set canvas height and width
canvasRef.current.width = videoWidth;
canvasRef.current.height = videoHeight;
const ctx = canvasRef.current.getContext("2d");
const poses = await detector.estimatePoses(video)
drawPoints(ctx, poses[0]["keypoints"])
}
}
start()
return (
<div className="App">
<header className="App-header">
<Webcam
ref={webcamRef}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zindex: 9,
width: 1280,
height: 720,
}}
>
</Webcam>
<canvas
ref={canvasRef}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zindex: 9,
width: 1280,
height: 720,
}}
/>
</header>
</div>
);
}
export default App;
Hello all,
I have looked at a lot of other posts related to this issue but my question isn't simply how to put the correct preview dimensions based on the aspect ratio chosen. Im trying to figure out how Snapchat is able to consistently have a full screen camera preview expanding all the way from the status bar to the bottom of the screen. Ive used const ratios = await cameraRef.current.getSupportedRatiosAsync() to get the aspect ratios available for my test device and the highest one is 16:9, which doesn't give me enough height to have a preview that covers the entire screen and doesn't look distorted and out of the ordinary. In the android OS camera app theres an aspect ratio called 'full' which takes all the realestate available on the screen to show the camera preview, but the aspect ratios getSupportedRatiosAsync() returns do not include this 'full' aspect ratio. Im trying to achieve this flush look where the camera preview takes the entire screen height available and does not distort, but I end up having the black bar at the bottom because I have to take into account the aspect ratio of the camera and put the height of the preview acordingly. Is there something i'm missing here? When I simply put a flex:1 for the camera preview it takes over the screen but it ends up being a distorted preview because it's not fitting the aspect ratio of the camera.
import React, { useState, useRef, useEffect } from 'react'
import {
View,
Text,
StyleSheet,
Dimensions,
Modal,
ImageBackground,
Animated,
BackHandler,
} from 'react-native'
import { TouchableOpacity } from 'react-native-gesture-handler'
//expo camera
import { Camera } from 'expo-camera'
//expo AV
import { Video, AVPlaybackStatus } from 'expo-av'
//custom components
import HeaderX from '../components/HeaderX'
import CameraButton from '../components/CameraButton'
//ionicons
import { Entypo, Ionicons } from '#expo/vector-icons'
import { Icon } from 'react-native-elements'
//ionicons
//colors
import colors from '../constants/colors'
//safe area
import { useSafeAreaInsets } from 'react-native-safe-area-context'
//redux
import { takePicture } from '../store/camera/actions'
import { useDispatch, useSelector } from 'react-redux'
// MediaLibrary
import * as MediaLibrary from 'expo-media-library'
//gesture handlers
import {
PinchGestureHandler,
PinchGestureHandlerGestureEvent,
State,
TapGestureHandler,
TapGestureHandlerStateChangeEvent,
} from 'react-native-gesture-handler'
import Reanimated, {
Extrapolate,
interpolate,
useAnimatedGestureHandler,
useAnimatedProps,
useSharedValue,
useAnimatedStyle,
runOnJS,
runOnUI,
} from 'react-native-reanimated'
//nav 5
import { useFocusEffect } from '#react-navigation/native'
//status bar
import { StatusBar } from 'expo-status-bar'
const { height, width } = Dimensions.get('window')
console.log('🚀 ~ file: CameraScreen.js ~ line 68 ~ height', height)
console.log('🚀 ~ file: CameraScreen.js ~ line 68 ~ width', width)
const CameraScreen = ({ navigation, route }) => {
let checkMarkSet = null
if (route.params) {
checkMarkSet = true
}
// RATIO SETTER
const [imagePadding, setImagePadding] = useState(0)
const [ratio, setRatio] = useState('4:3') // default is 4:3
const screenRatio = height / width
const [isRatioSet, setIsRatioSet] = useState(false)
async function prepareRatio() {
let desiredRatio = '4:3' // Start with the system default
// This issue only affects Android
if (Platform.OS === 'android') {
const ratios = await cameraRef.current.getSupportedRatiosAsync()
let distances = {}
let realRatios = {}
let minDistance = null
for (const ratio of ratios) {
const parts = ratio.split(':')
const ratioHeight = parseInt(parts[0])
const ratioWidth = parseInt(parts[1])
const realRatio = ratioHeight / ratioWidth
realRatios[ratio] = realRatio
// ratio can't be taller than screen, so we don't want an abs()
const distance = screenRatio - realRatio
distances[ratio] = realRatio
if (minDistance == null) {
minDistance = ratio
} else {
if (distance >= 0 && distance < distances[minDistance]) {
minDistance = ratio
}
}
}
// set the best match
desiredRatio = minDistance
// calculate the difference between the camera width and the screen height
const remainder = Math.floor(
(height - realRatios[desiredRatio] * width) / 2
)
// set the preview padding and preview ratio
setImagePadding(remainder / 2)
console.log(`okay look ${remainder / 2}`)
setRatio(desiredRatio)
// Set a flag so we don't do this
// calculation each time the screen refreshes
setIsRatioSet(true)
}
}
const setCameraReady = async () => {
if (!isRatioSet) {
await prepareRatio()
}
}
// RATIO SETTER
const [type, setType] = useState(Camera.Constants.Type.back)
const [activateCamera, setActivateCamera] = useState(false)
const [video, setVideo] = useState('')
const [showVideoModal, setShowVideoModal] = useState(false)
const insets = useSafeAreaInsets()
useFocusEffect(() => {
if (navigation.isFocused()) {
setActivateCamera(true)
}
})
const [pic, setPic] = useState(null)
const [showModal, setShowModal] = useState(false)
const cameraRef = useRef()
const dispatch = useDispatch()
const [zooming, setZooming] = useState(0)
//camera settings
const [flashMode, setFlashMode] = useState('off')
// const picTaken = useSelector((state) => state.cameraReducer.pictureUri)
// console.log(
// '🚀 ~ file: CameraScreen.js ~ line 36 ~ CameraScreen ~ picTaken',
// picTaken
// )
// camera Functions
async function takePictureHandler() {
try {
if (cameraRef.current) {
const options = {
quality: 0.5,
base64: true,
skipProcessing: true,
}
let photo = await cameraRef.current.takePictureAsync(options)
setPic(photo.uri)
dispatch(takePicture(photo.uri))
setShowModal(true)
}
} catch (err) {
console.log(err)
}
// setPickedImage(image.uri)
// props.onImageTaken(image.uri)
}
function flipCameraHandler() {
setType(
type === Camera.Constants.Type.back
? Camera.Constants.Type.front
: Camera.Constants.Type.back
)
}
function flashSwitchHandler() {
if (flashMode === 'off') {
setFlashMode('on')
}
if (flashMode === 'on') {
setFlashMode('off')
}
}
async function savePictureLocallyHandler(localUri) {
const { status } = await MediaLibrary.getPermissionsAsync()
if (status === 'undetermined') {
const { status } = await MediaLibrary.requestPermissionsAsync()
if (status === 'granted') {
const asset = await MediaLibrary.createAssetAsync(localUri)
}
}
if (status === 'granted') {
const asset = await MediaLibrary.createAssetAsync(localUri)
if (asset) {
//display check mark showing it was saved.
}
}
if (status === 'denied') {
console.log('Open settings and give permission')
}
}
// zoom gesture handler
const zoom = useSharedValue(0)
const MAX_ZOOM_FACTOR = 20
const SCALE_FULL_ZOOM = 20
const formatMaxZoom = 1
const maxZoomFactor = Math.min(formatMaxZoom, MAX_ZOOM_FACTOR)
const neutralZoomScaled = (neutralZoom / maxZoomFactor) * formatMaxZoom
const maxZoomScaled = (1 / formatMaxZoom) * maxZoomFactor
const neutralZoom = 0
useAnimatedProps(
() => ({
zoom: interpolate(
zoom.value,
[0, neutralZoomScaled, 1],
[0, neutralZoom, maxZoomScaled],
Extrapolate.CLAMP
),
}),
[maxZoomScaled, neutralZoom, neutralZoomScaled, zoom]
)
function updateValue() {
setZooming(zoom.value)
}
function willThisWork() {
'worklet'
runOnJS(updateValue)()
}
const onPinchGesture = useAnimatedGestureHandler({
onStart: (_, context) => {
context.startZoom = zoom.value
},
onActive: (event, context) => {
// trying to map the scale gesture to a linear zoom here
const startZoom = context.startZoom ?? 0
const scale = interpolate(
event.scale,
[1 - 1 / SCALE_FULL_ZOOM, 1, SCALE_FULL_ZOOM],
[-1, 0, 1],
Extrapolate.CLAMP
)
zoom.value = interpolate(
scale,
[-1, 0, 1],
[0, startZoom, 1],
Extrapolate.CLAMP
)
willThisWork()
},
})
// VIDEO RECORDING
async function beginRecording() {
console.log('started')
let video = await cameraRef.current.recordAsync()
setVideo(video)
// setPic(photo.uri)
// dispatch(takePicture(photo.uri))
}
async function endRecording() {
console.log('ended')
cameraRef.current.stopRecording()
setShowVideoModal(true)
}
return (
<View
style={{
...styles.container,
// paddingTop: Platform.OS === 'android' ? insets.top : null,
}}
>
<StatusBar
style=""
translucent
backgroundColor="rgba(255,255,255,0)"
/>
<PinchGestureHandler onGestureEvent={onPinchGesture}>
<Reanimated.View
style={{
flex: 1,
backgroundColor: 'back',
justifyContent: 'flex-start',
paddingBottom: imagePadding * 4,
}}
>
{activateCamera && (
<Camera
style={{
// marginTop: imagePadding,
// marginBottom: imagePadding,
flex: 1,
// height: 733,
}}
ref={cameraRef}
type={type}
flashMode={flashMode}
zoom={zooming}
onCameraReady={setCameraReady}
ratio={ratio}
maxDuration={10000}
autoFocus="on"
>
<View
style={[
styles.contentContainer,
{
paddingTop: insets.top,
paddingBottom: insets.bottom,
top: insets.top,
bottom: insets.bottom,
},
]}
>
<View style={styles.topLeftCont}>
<TouchableOpacity
onPress={flipCameraHandler}
>
<Entypo
name="loop"
size={27}
color="white"
style={styles.flipIcon}
/>
</TouchableOpacity>
<TouchableOpacity
onPress={flashSwitchHandler}
>
<Ionicons
name={
flashMode !== 'off'
? 'flash'
: 'flash-off'
}
size={27}
color="white"
style={styles.cameraSettingsButton}
/>
</TouchableOpacity>
</View>
<CameraButton
style={{
...styles.floatingPlusCont,
left: width / 2 - 45,
}}
onLongPress={beginRecording}
onEndPress={endRecording}
onTap={takePictureHandler}
/>
</View>
</Camera>
)}
</Reanimated.View>
</PinchGestureHandler>
)
}
const styles = StyleSheet.create({
container: {
flex: 1,
justifyContent: 'flex-start',
},
contentContainer: {
flex: 1,
position: 'absolute',
right: 0,
left: 0,
},
camera: {
flex: 1,
flexDirection: 'row',
},
topLeftCont: {
position: 'absolute',
width: 45,
top: 0,
right: 10,
borderRadius: 20,
backgroundColor: 'rgba(184,184,184,0.42)',
alignItems: 'center',
justifyContent: 'space-between',
// flexDirection: 'row',
padding: 5,
},
flipIcon: {
marginVertical: 7,
transform: [
{
rotate: '90deg',
},
],
},
cameraSettingsButton: { marginVertical: 7 },
modal: {
flex: 1,
position: 'absolute',
top: 0,
right: 0,
left: 0,
bottom: 0,
},
takenImage: { flex: 1 },
bottomCont: {
flex: 1,
justifyContent: 'flex-end',
padding: 10,
},
bottomButtonsCont: {
width: '100%',
justifyContent: 'space-between',
flexDirection: 'row',
paddingHorizontal: 5,
},
floatingPlusCont: {
bottom: 25,
position: 'absolute',
width: 90,
height: 90,
borderRadius: 45,
},
loadingView: {
backgroundColor: 'rgba(0,0,0,0.4)',
justifyContent: 'center',
alignItems: 'center',
},
})
export default CameraScreen
I'm trying to use my useScrollPosition() hooks in my main app. It's used to adjust the container's height depending on the scroll position.
Problem: When I scroll down, the useScrollPosition() isnt being called.
Here's the working example: https://codesandbox.io/s/recursing-khorana-5yffl?file=/src/App.js
Please help me get my useScrollPosition() hook to work again. I have a feeling it is due to my fixed position styling, or the way I'm calling the eventListener.
My code:
import React, { useEffect, useState } from "react";
const useWindowSize = () => {
const [windowSize, setWindowSize] = useState({
width: undefined,
height: undefined
});
useEffect(() => {
function handleResize() {
setWindowSize({
width: window.innerWidth,
height: window.innerHeight
});
}
window.addEventListener("resize", handleResize);
handleResize();
return () => window.removeEventListener("resize", handleResize);
}, []);
return windowSize;
};
const useScrollPosition = () => { // error is probably here
const [position, setPosition] = useState({
x: window.scrollX,
y: window.scrollY
});
useEffect(() => {
const handleScroll = () => {
console.log("test");
setPosition({
x: window.scrollX,
y: window.scrollY
});
};
window.addEventListener("scroll", handleScroll);
handleScroll();
return () => window.removeEventListener("scroll", handleScroll);
}, []);
return position;
};
const appContainerStyle = {
position: "fixed",
height: "100%",
width: "100%",
display: "flex",
justifyContent: "center",
alignItems: "flex-end",
backgroundColor: "black",
overflowY: "scroll"
};
export default function App() {
const size = useWindowSize();
const position = useScrollPosition();
const containerStyle = {
position: "relative",
height: size.width <= 1024 && size.height <= 1366 ? "100%" : "50%",
width: "100%"
};
const contentContainerStyle = {
height: position.y <= size.height / 10 ? "90%" : "100%",
width: "100%"
};
const navContainerStyle = {
position: "fixed",
height: position.y <= size.height / 10 ? "10%" : "0%",
width: "100%",
backgroundColor: "rgba(0,0,0,.5)",
zIndex: "1",
top: "0"
};
console.log(position.y, size.height / 10);
return (
<div style={appContainerStyle}>
<div style={navContainerStyle}></div>
<div style={contentContainerStyle}>
<div style={{ ...containerStyle, backgroundColor: "red" }}></div>
<div style={{ ...containerStyle, backgroundColor: "green" }}></div>
<div style={{ ...containerStyle, backgroundColor: "blue" }}></div>
<div style={{ ...containerStyle, backgroundColor: "orange" }}></div>
<div style={{ ...containerStyle, backgroundColor: "purple" }}></div>
</div>
</div>
);
}
You are not scrolling inside the window, but inside the appContainer.
You need to add the event listener to the appContainer. That can be done with a ref:
const myScrollBox = useRef(null);
const position = useScrollPosition( myScrollBox.current );
// ...
<div id="mainContainer" style={ appContainerStyle } ref={ myScrollBox }>
Inside useScrollPosition you need to make sure the component has been rendered already:
const useScrollPosition = ( elementRef ) => { // <-- pass the element ref
const [ position, setPosition ] = useState({ x: 0, y: 0 });
const [ element, setElement ] = useState(null);
// -- set DOM element, if ref is ready and element is not set already
useEffect(() =>{
if( elementRef && !element ){
setElement( elementRef );
}
});
useEffect(() => {
if( !element ){ return; } // <-- skip if element is not ready
const handleScroll = () => {
setPosition({
x: element.scrollTop, // <-- .scrollTop instead of .scrollY
y: element.scrollLeft // <-- .scrollLeft instead of .scrollX
});
};
element.addEventListener("scroll", handleScroll);
return () => { element.removeEventListener('scroll', handleScroll); };
}, [ element ]); // <-- add element to dependencies
return position;
};
I have an array of 3 images and I want to use it as a background image.
import React from 'react'
import Bin1 from './images/bin1.png'
import Bin2 from './images/bin2.png'
import Bin3 from './images/bin3.png'
const array = ['Bin1', 'Bin2', 'Bin3'];
const style = {
height: '20rem',
width: '15rem',
marginRight: '1.5rem',
marginBottom: '1.5rem',
color: 'white',
padding: '1rem',
textAlign: 'center',
fontSize: '1rem',
lineHeight: 'normal',
float: 'left',
backgroundImage : `url(${Bin1})`
}
So instead of one URL of image, I want to map all three of them. I am new in React and really wanted to know how to solve this one.
So for Dustbin.jsx you will need to create an object of images with key-value as name of the image and the source. Then, appending backgroundImage while passing the style object to the div. Now, when you create a Dustbin component, just pass the name of the image you want to render as a prop (I called it bgImageName). Like this:
Dustbin.jsx
import React from "react";
import { DropTarget } from "react-dnd";
// THIS IS THE IMAGES LIST
const backgroundsList = {
tree:
"https://cdn.pixabay.com/photo/2020/02/17/19/33/tree-4857597_960_720.png",
avocado:
"https://cdn.pixabay.com/photo/2020/05/04/18/55/avocado-5130214_960_720.png",
snowman:
"https://cdn.pixabay.com/photo/2019/12/22/01/14/snowman-4711637_960_720.png"
};
const style = {
height: "12rem",
width: "12rem",
marginRight: "1.5rem",
marginBottom: "1.5rem",
color: "white",
padding: "1rem",
textAlign: "center",
fontSize: "1rem",
lineHeight: "normal",
float: "left",
backgroundSize: "contain" // TO FIT DIV
};
export const Dustbin = ({
accepts,
isOver,
canDrop,
connectDropTarget,
lastDroppedItem,
bgImageName
}) => {
const isActive = isOver && canDrop;
let backgroundColor = "#222";
if (isActive) {
backgroundColor = "darkgreen";
} else if (canDrop) {
backgroundColor = "darkkhaki";
}
let backgroundImage = `url(${backgroundsList[bgImageName]})`; // PASS A PROPERTY CALLED bgImageName WITH THE NAME OF THE IMAGE WE WANT.
return connectDropTarget(
<div style={{ ...style, backgroundColor, backgroundImage }}> // APPEND HERE
{isActive
? "Release to drop"
: `This dustbin accepts: ${accepts.join(", ")}`}
{lastDroppedItem && (
<p>Last dropped: {JSON.stringify(lastDroppedItem)}</p>
)}
</div>
);
};
export default DropTarget(
(props) => props.accepts,
{
drop(props, monitor) {
props.onDrop(monitor.getItem());
}
},
(connect, monitor) => ({
connectDropTarget: connect.dropTarget(),
isOver: monitor.isOver(),
canDrop: monitor.canDrop()
})
)(Dustbin);
And in Container.jsx add a property of background image to each dustbin object, and pass it to the component. like this:
Container.jsx
import React, { useState, useCallback } from "react";
import { NativeTypes } from "react-dnd-html5-backend";
import Dustbin from "./Dustbin";
import Box from "./Box";
import { ItemTypes } from "./ItemTypes";
import update from "immutability-helper";
export const Container = () => {
// ADD bgImageName TO EACH DUSTBIN OBJECT
const [dustbins, setDustbins] = useState([
{ accepts: [ItemTypes.GLASS], lastDroppedItem: null, bgImageName: "tree" },
{
accepts: [ItemTypes.FOOD],
lastDroppedItem: null,
bgImageName: "avocado"
},
{
accepts: [ItemTypes.PAPER, ItemTypes.GLASS, NativeTypes.URL],
lastDroppedItem: null,
bgImageName: "snowman"
},
{
accepts: [ItemTypes.PAPER, NativeTypes.FILE],
lastDroppedItem: null,
bgImageName: "tree"
}
]);
const [boxes] = useState([
{ name: "Bottle", type: ItemTypes.GLASS },
{ name: "Banana", type: ItemTypes.FOOD },
{ name: "Magazine", type: ItemTypes.PAPER }
]);
const [droppedBoxNames, setDroppedBoxNames] = useState([]);
function isDropped(boxName) {
return droppedBoxNames.indexOf(boxName) > -1;
}
const handleDrop = useCallback(
(index, item) => {
const { name } = item;
setDroppedBoxNames(
update(droppedBoxNames, name ? { $push: [name] } : { $push: [] })
);
setDustbins(
update(dustbins, {
[index]: {
lastDroppedItem: {
$set: item
}
}
})
);
},
[droppedBoxNames, dustbins]
);
return (
<div>
<div style={{ overflow: "hidden", clear: "both" }}>
{dustbins.map(({ accepts, lastDroppedItem, bgImageName }, index) => (
<Dustbin
accepts={accepts}
lastDroppedItem={lastDroppedItem}
onDrop={(item) => handleDrop(index, item)}
key={index}
bgImageName={bgImageName} // DONT FORGET TO PASS bgImageName PROPERTY TO Dustbin COMPONENT
/>
))}
</div>
<div style={{ overflow: "hidden", clear: "both" }}>
{boxes.map(({ name, type }, index) => (
<Box
name={name}
type={type}
isDropped={isDropped(name)}
key={index}
/>
))}
</div>
</div>
);
};
Codesandbox here
Im working on a React Material Ui component currently.
currently the goal is to be able to change JSS styling according to some custom function or values in another part of the application, without needing to add / remove classes
This is my current setup:
stepControl.js
const stepControl = () => {
const [activeStep, setActiveStep] = useState(0)
const [completed, setCompleted] = useState(new Set())
const [stepCount, setStepCount] = useState(0)
const completedSteps = () => {
return completed.size
}
const totalSteps = () => {
return stepCount
}
const allStepsCompleted = () => {
return completedSteps() === totalSteps()
}
return {
state: {
activeStep,
completed
},
actions: {
totalSteps,
completedSteps,
allStepsCompleted,
setActiveStep,
setCompleted,
setStepCount
}
}
}
export default stepControl
index.styles.js
import { makeStyles } from '#tim/functional'
import stepControl from './stepControl.js'
const useStyles = () => {
const { state, actions } = stepControl()
return makeStyles(theme => ({
root: {
width: '70%',
margin: 'auto',
'& .MuiPaper-root': {
backgroundColor: 'transparent'
},
'& .MuiStepConnector-lineHorizontal': {
borderTopWidth: 6,
borderRadius: 1,
marginTop: -2,
borderColor: actions.allStepsCompleted()
? theme.palette.primary
: rgba(0, 0, 0, 0.24)
}
},
button: {
marginRight: theme.spacing(1)
},
backButton: {
marginRight: theme.spacing(1)
},
completed: {
display: 'inline-block',
'& .MuiStepConnector-lineHorizontal': {
borderTopWidth: 6,
borderRadius: 1,
borderColor: theme.palette.primary
}
},
instructions: {
marginTop: theme.spacing(1),
marginBottom: theme.spacing(1)
}
}))
}
export default useStyles
Currently im getting too many rerenders error even if i just return a fixed value in the function in question allStepsCompleted() . how would i go about doing this in Material Ui?
Im running:
React 16.9
Mui 4.9.5
on a webpack 4.4 server on macOS 10.15
Thanks in advance for any suggestions