10일차

송용진·2024년 3월 3일
# %% [markdown]
# MediaPipe를 이용한 라이브러리 탐색
# https://developers.google.com/mediapipe/solutions/guide
# 
# text embedding은 수치화 하는 것
# 라이브러리를 가져다 써보기 장단점?
# 
# 이번주 미션 : 외부 라이브러리를 이용해서 나만의 기능을 구현해 보자
# 
# AI vs AGI
# 페르소나

# %%
# 임베딩
사랑 : [0.2, 0.5, 0.6]
호감 : [0.21, 0.4, 0.65]
적대 : [0.7, 0.8, 0.1]

# %%
import mediapipe as mp
from mediapipe.tasks import python
from mediapipe.tasks.python import vision

import cv2
import numpy as np

model_path = 'C:\workspaces\day10\efficientdet_lite2.tflite'

MARGIN = 10  # pixels
ROW_SIZE = 10  # pixels
FONT_SIZE = 1
FONT_THICKNESS = 1
TEXT_COLOR = (0, 255, 0)  # red

def visualize(
    image,
    detection_result
) -> np.ndarray:
  """Draws bounding boxes on the input image and return it.
  Args:
    image: The input RGB image.
    detection_result: The list of all "Detection" entities to be visualize.
  Returns:
    Image with bounding boxes.
  """
  for detection in detection_result.detections:
    # Draw bounding_box
    bbox = detection.bounding_box
    start_point = bbox.origin_x, bbox.origin_y
    end_point = bbox.origin_x + bbox.width, bbox.origin_y + bbox.height
    cv2.rectangle(image, start_point, end_point, TEXT_COLOR, 3)

    # Draw label and score
    category = detection.categories[0]
    category_name = category.category_name
    probability = round(category.score, 2)
    result_text = category_name + ' (' + str(probability) + ')'
    text_location = (MARGIN + bbox.origin_x,
                     MARGIN + ROW_SIZE + bbox.origin_y)
    cv2.putText(image, result_text, text_location, cv2.FONT_HERSHEY_PLAIN,
                FONT_SIZE, TEXT_COLOR, FONT_THICKNESS)

  return image

BaseOptions = mp.tasks.BaseOptions
ObjectDetector = mp.tasks.vision.ObjectDetector
ObjectDetectorOptions = mp.tasks.vision.ObjectDetectorOptions
VisionRunningMode = mp.tasks.vision.RunningMode

options = ObjectDetectorOptions(
    base_options=BaseOptions(model_asset_path=model_path),
    max_results=5,
    running_mode=VisionRunningMode.IMAGE)

cap = cv2.VideoCapture("dance.mp4")
with ObjectDetector.create_from_options(options) as detector:
    while cap.isOpened():
        ret, frame = cap.read()
        if ret:
            image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            mp_image = mp.Image(image_format = mp.ImageFormat.SRGB, data=image)
            detection_result = detector.detect(mp_image)
            annotated_image = visualize(frame, detection_result)
            cv2.imshow('Frame', annotated_image)
            if cv2.waitKey(5) == ord('q'):
                break
cv2.destroyAllWindows()

# %%
import numpy as np

a = np.array([1,2,3])
b = np.array([1,2,3])
np.multiply(a, b)

# %%
# 위치를 나타낼때, 소수로 나타내고 있습니다.
x = 0.5
y = 0.6

# %%
# 실제 이미지의 가로 세로를 곱해서 좌표를 얻는 과정
np.multiply((x,y), (640,480))

# %%
import cv2
import mediapipe as mp
import numpy as np
from utils import calculate_angle

def calculate_angle(a, b, c):
    a = np.array(a) # first
    b = np.array(b) # second
    c = np.array(c) # end

    radians = np.arctan2(c[1]-b[1], c[0]-b[0]) - np.arctan2(a[1]-b[1], a[0]-b[0])
    angle = np.abs(radians*180.0/np.pi)

    if angle > 180.0:
        angle = 360-angle
    return angle

mp_drawing = mp.solutions.drawing_utils
mp_pose = mp.solutions.pose

cap = cv2.VideoCapture('dance2.mp4')

with mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5) as pose:
    while cap.isOpened():
        ret, image = cap.read()
        if ret:
            image_pose = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            results = pose.process(image_pose)
            try:
                landmarks = results.pose_landmarks.landmark
                # Get coordinates
                shoulder = [landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].x,landmarks[mp_pose.PoseLandmark.LEFT_SHOULDER.value].y]
                elbow = [landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].x,landmarks[mp_pose.PoseLandmark.LEFT_ELBOW.value].y]
                wrist = [landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].x,landmarks[mp_pose.PoseLandmark.LEFT_WRIST.value].y]
                angle = calculate_angle(shoulder, elbow, wrist)
                cv2.putText(image, str(angle),
                            tuple(np.multiply(elbow, [640, 480]).astype(int)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 2, cv2.LINE_AA
                            )
            except:pass
            mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS,
                                mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2),
                                mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=2)
                                 )

            cv2.imshow("Image", image)
            if cv2.waitKey(1)==ord('q'):
                break

cap.release()
cv2.destroyAllWindows()

# %%
import cv2
import mediapipe as mp
import time

mp_objectron = mp.solutions.objectron
mp_drawing = mp.solutions.drawing_utils

cap = cv2.VideoCapture('shoe.mp4')

with mp_objectron.Objectron(
    static_image_mode = False,
    max_num_objects=2,
    min_detection_confidence=0.5,
    min_tracking_confidence = 0.8,
    model_name='Shoe'
) as objectron:
    while cap.isOpened():
        ret, image = cap.read()
        start = time.time()
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image.flags.writeable = False
        results = objectron.process(image)

        image.flags.writeable = True
        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

        if results.detected_objects:
            for detected_object in results.detected_objects:
                mp_drawing.draw_landmarks(image, detected_object.landmarks_2d, mp_objectron.BOX_CONNECTIONS)
                mp_drawing.draw_axis(image, detected_object.rotation, detected_object.translation)

        end = time.time()
        totalTime = end -start
        fps = 1 / totalTime
        cv2.putText(image, f'FPS : {int(fps)}', (20, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.5, (0, 255, 0), 2)
        cv2.imshow("MediaPipe Objectron", image)
        if cv2.waitKey(5) & 0xFF ==27:
            break
cap.release()
cv2.destroyAllWindows()
information = {"고향":"수원", "취미":"영화관람","좋아하는 음식":"국수"}
del information["좋아하는 음식"]
information.clear() # 딕셔너리 비우기

foods = ["된장찌개", "피자", "제육볶음"]
foods.append("김밥")
del foods[1]

print(information.items())
#dict_items([('고향', '수원'), ('취미', '영화관람'), ('좋아하는 음식', '국수')])

for key,value in information.items():
    print(key,value)    
'''
고향 수원
취미 영화관람
좋아하는 음식 국수
'''

menu1 = set(["된장찌개", "피자", "제육볶음"])
menu2 = set(["된장찌개", "떡국", "김밥"])

menu3 = menu1 | menu2 # 합집합
menu4 = menu1 & menu2 # 교집합
menu5 = menu1 - menu2 # 차집합

문자열을 set으로 바꿔주기 위해서는
먼저 문자열을 리스트로 바꿔야한다.
set_lunch(["된장찌개", "피자", "제육볶음", "짜장면"])
item = "짜장면"
set_lunch - set([item])

import time
tiem.sleep(1)

# 리스트에서 빼는 것보다 차집합하는 것이 더 효율적일 수 있음

# random.choice는 리스트에서만 동작
profile
개발자

0개의 댓글