lundi 2 août 2021

How to control repeated function in python and javascript

from asyncio.streams import start_server
import cv2
import numpy as np
from keras.models import load_model
from statistics import mode

from numpy.lib.function_base import append
from utils.datasets import get_labels
from utils.inference import detect_faces
from utils.inference import draw_text
from utils.inference import draw_bounding_box
from utils.inference import apply_offsets
from utils.inference import load_detection_model
from utils.preprocessor import preprocess_input

import asyncio
import websockets

async def accept (websocket, path):
    while True:


        USE_WEBCAM = True # If false, loads video file source

        # parameters for loading data and images
        emotion_model_path = '/Users/sejong/Desktop/webos/teample/displayApp/Emotion/models/emotion_model.hdf5'
        emotion_labels = get_labels('fer2013')

        # hyper-parameters for bounding boxes shape
        frame_window = 10
        emotion_offsets = (20, 40)

        # loading models
        face_cascade = cv2.CascadeClassifier('/Users/sejong/Desktop/webos/teample/displayApp/Emotion/models/haarcascade_frontalface_default.xml')
        emotion_classifier = load_model(emotion_model_path)

        # getting input model shapes for inference
        emotion_target_size = emotion_classifier.input_shape[1:3]

        # starting lists for calculating modes
        emotion_window = []

        # starting video streaming

        cv2.namedWindow('window_frame')
        video_capture = cv2.VideoCapture(1)

        # Select video or webcam feed
        cap = None
        if (USE_WEBCAM == True):
            cap = cv2.VideoCapture(0) # Webcam source
        else:
            cap = cv2.VideoCapture('./demo/dinner.mp4') # Video file source

        while cap.isOpened(): # True:
            ret, bgr_image = cap.read()

            #bgr_image = video_capture.read()[1]

            gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
            rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)

            faces = face_cascade.detectMultiScale(gray_image, scaleFactor=1.1, minNeighbors=5,
                    minSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)

            for face_coordinates in faces:

                x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
                gray_face = gray_image[y1:y2, x1:x2]
                try:
                    gray_face = cv2.resize(gray_face, (emotion_target_size))
                except:
                    continue

                gray_face = preprocess_input(gray_face, True)
                gray_face = np.expand_dims(gray_face, 0)
                gray_face = np.expand_dims(gray_face, -1)
                emotion_prediction = emotion_classifier.predict(gray_face)
                emotion_probability = np.max(emotion_prediction)
                emotion_label_arg = np.argmax(emotion_prediction)
                emotion_text = emotion_labels[emotion_label_arg]
                emotion_window.append(emotion_text)

                if len(emotion_window) > frame_window:
                    emotion_window.pop(0)
                try:
                    emotion_mode = mode(emotion_window)
                except:
                    continue

                if emotion_text == 'angry':
                    color = emotion_probability * np.asarray((255, 0, 0))
                elif emotion_text == 'sad':
                    color = emotion_probability * np.asarray((0, 0, 255))
                elif emotion_text == 'happy':
                    color = emotion_probability * np.asarray((255, 255, 0))
                elif emotion_text == 'surprise':
                    color = emotion_probability * np.asarray((0, 255, 255))
                else:
                    color = emotion_probability * np.asarray((0, 255, 0))

                color = color.astype(int)
                color = color.tolist()

                # emotionTextArr = []
                # angry = 0
                # sad = 0
                # happy = 0
                # neutural = 0
                # emotionArr = [angry, sad, happy, neutural]
                # i = 0
                # j = 0
                # k = 0
                # temp = None
                # while i < 5:
                #     emotionTextArr.insert(i, emotion_text)
                #     if emotionTextArr[i] == 'angry':
                #         angry += 1
                #     elif emotionTextArr[i] == 'sad':
                #         sad += 1
                #     elif emotionTextArr[i] == 'happy':
                #         happy += 1
                #     elif emotionTextArr[i] == 'neutural':
                #         neutural += 1
                #     i += 1
                # while j < 4:
                #     while k < 4:
                #         if (emotionArr[k] > emotionArr[k + 1]):
                #             temp = emotionArr[k]
                #             emotionArr[k] = emotionArr[k + 1]
                #             emotionArr[k + 1] = temp
                #             k += 1
                #         k = 0
                #         j += 1
                # if (emotionArr[j] == angry):
                #         await websocket.send('angry')
                # elif (emotionArr[j] == sad):
                #     await websocket.send('sad')
                # elif (emotionArr[j] == happy):
                #     await websocket.send('happy')
                # elif (emotionArr[j] == neutural):
                #     await websocket.send('neutural')
                await websocket.send(emotion_text) #### !! this section send emotion to server
                draw_bounding_box(face_coordinates, rgb_image, color)
                draw_text(face_coordinates, rgb_image, emotion_mode,
                        color, 0, -45, 1, 1)

            bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)
            cv2.imshow('window_frame', bgr_image)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        cap.release()
        cv2.destroyAllWindows()
start_server = websockets.serve(accept, 'localhost', 9998)

asyncio.get_event_loop().run_until_complete(start_server)
asyncio.get_event_loop().run_forever()
<!DOCTYPE html>
<html>
<head>
<title>Example Web App</title>
<script src="./node_modules/axios/dist/axios.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/2.3.0/socket.io.js"></script>
<style>
    * {margin: 0; padding: 0; border: 0;}
    
    .wrapper {
        background-color: white;
        width: 100vw;
        height: 100vh;
    }
    
    .img {
        width: 100%;
        height: 100%;
    }
</style>
</head>
<body>
    <div id="wrapper" class="wrapper">
        
    </div>
    <script
    
    ></script>
    <script>
        const wrapper = document.getElementById('wrapper');
        // 디스플레이 해주는 함수
        const displayImg = (weather) => {
            // img태그 생성
            let img = document.createElement('img');
            // 감정 변화에 따른 태그 사용을 위해 img태그의 id 설정
            img.setAttribute('id', 'img')
            // img태그에 style적용을 위해 class적용
            img.classList.add('img');
            let imgNumber = 1;
            // 이미지 폴더에서 랜덤으로 파일 가져오기 위한 랜덤함수
            imgNumber = Math.floor(Math.random() * 4) + 1;
            // 날씨 api에서 받은 데이터를 weather 매개변수로 받고 각 케이스 별로 wrapper 태그 안에
            // img태그 자식으로 추가
            switch (weather)
            {
                case 'Clear':
                    index = 'clear';
                    src = `./images/clear/${imgNumber}.jpg`;
                    img.setAttribute('src', src);
                    break;
                case 'Rain':
                case 'Clouds':
                    index = 'cloud';
                    src = `./images/cloud/${imgNumber}.jpg`;
                    img.setAttribute('src', src);
                    break;
                case 'Snow':
                    index = 'snow';
                    src = `./images/snow/${imgNumber}.jpg`;
                    img.setAttribute('src', src);
                    break;
            }
            wrapper.appendChild(img);
        }
        // 날씨 api로부터 현재 위치의 날씨 데이터 가져오는 함수
        const getWeather = async (latitude, longitude) => {
            const { data } = await axios.get(`http://api.openweathermap.org/data/2.5/weather?lat=${latitude}&lon=${longitude}&APPID=b3c49cd560fb3defc700f36b7bc8a4a4`);
            console.log(data.weather[0].main);
            displayImg(data.weather[0].main);
        }
        const getLocation = () => {
            let location = 'Jeju Jejusi'
            axios.get('https://maps.googleapis.com/maps/api/geocode/json', {
                params: {
                    address: location,
                    key: ''
                }
            }).then(function(response) {
                console.log(response.data.results[0].geometry.location);
                let latitude = response.data.results[0].geometry.location.lat;
                let longitude = response.data.results[0].geometry.location.lng;
                console.log(latitude);
                console.log(longitude);
                getWeather(latitude, longitude);
            })
        }
        getLocation();

        var webSocket = new WebSocket("ws://localhost:9998");

        var messageTextArea = document.getElementById("messageTextArea");

        webSocket.onopen = function(message){
        console.log('connect')

        };

        webSocket.onclose = function(message){
        console.log('disconnect')
        };

        webSocket.onerror = function(message){
        console.log('err')
        };
        // 표정 감정 데이터 가져오는 코드
        let emotionArr = [];
        webSocket.onmessage = function(message){
            console.log(message.data)
            // 감정을 message.data로 가져온다
            const image = document.getElementById('img');
            // 날씨 변화에 따른 display되는 img태그를 가져온다
            let imgNumber = 1;
            imgNumber = Math.floor(Math.random() * 4) + 1;
            switch (message.data)
            {
                // 케이스문으로 감정에 따라 img태그의 src 변경
                case 'happy':
                    src = `./images/clear/${imgNumber}.jpg`;
                    image.src = src;
                    break;
                case 'sad':
                    src = `./images/cloud/${imgNumber}.jpg`;
                    image.src = src;
                    break;
                    src = `./images/snow/${imgNumber}.jpg`;
                    image.src = src;
                    break;
                case 'neutual':
                    src = `./images/neutual/${imgNumber}.jpg`
                    image.src = src;
                case 'angry':
                    src = `./images/angry/${imgNumber}.jpg`
                    image.src = src;
            }
        };    
    </script>
</body>
</html>

Hi guys! I'm writing code that changes an image in response to changes in emotion using weather and facial expression recognition. The image change by the weather is working well, but as the expression recognition continues, the image change is constantly taking place according to the emotional change. How to solve this problem?

python code is open source!

Problem : How can I detect facial expressions only once?




Aucun commentaire:

Enregistrer un commentaire