headers = {'Ocp-Apim-Subscription-Key' : subscription_key }
params = {'visualFeatures':'Categories,Description,Color'}
data = {'url':image_url}
response = requests.post(analyze_url, headers=headers, params=params, 
                         json=data)
                         
analysis = response.json()
analysis //데이터 분석
analysis['description']['captions'][0]['text']
사물 인식 소스 코드
Object Detection
objectDetection_url = vision_base_url + 'detect'
object_image = ''http://tong.visitkorea.or.kr/cms/resource_etc/67/2597467_image_1.jpg''
e
img = Image.open(BytesIO(requests.get(object_image).content))
img
img
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
data = {'url':object_image}
response
response = requests.post(objectDetection_url,
                        headers = headers,
                        json = data)
detectionResult = response.json()
detectionResult
{'objects': [{'rectangle': {'x': 11, 'y': 38, 'w': 177, 'h': 115},
   'object': 'shuttle bus',
   'confidence': 0.748,
   'parent': {'object': 'bus',
    'confidence': 0.762,
    'parent': {'object': 'Land vehicle',
     'confidence': 0.778,
     'parent': {'object': 'Vehicle', 'confidence': 0.779}}}}],
 'requestId': '8eb5ffed-87f5-4c7d-867c-923f659a7ed9',
 'metadata': {'height': 166, 'width': 250, 'format': 'Jpeg'}}
from PIL import Image, ImageDraw, ImageFont
for obj in objects:
    print(obj)
    
    rect = obj['rectangle']
    
    x = rect['x']
    y = rect['y']
    w = rect['w']
    h = rect['h']
    
    draw.rectangle(((x, y), (x + w, y + h)), outline='red')
draw = ImageDraw.Draw(img)
objects = detectionResult['objects']
for obj in objects:
    print(obj)
    
    rect = obj['rectangle']
    
    x = rect['x']
    y = rect['y']
    w = rect['w']
    h = rect['h']
    
    draw.rectangle(((x, y), (x + w, y + h)), outline='red')
{'rectangle': {'x': 11, 'y': 38, 'w': 177, 'h': 115}, 'object': 'shuttle bus', 'confidence': 0.748, 'parent': {'object': 'bus', 'confidence': 0.762, 'parent': {'object': 'Land vehicle', 'confidence': 0.778, 'parent': {'object': 'Vehicle', 'confidence': 0.779}}}}
img
아래는 사물 인식 이후에 태그를 다는 코드다.
draw = ImageDraw.Draw(img)
objects = detectionResult['objects']
for obj in objects:
print(obj)
    rect = obj['rectangle']
    
    x = rect['x']
    y = rect['y']
    w = rect['w']
    h = rect['h']
    
    #사각혀을 그리는 코드
    draw.rectangle(((x, y), (x + w, y + h)), outline='red')
    
    #글자를 입력하는 코드
    objectName = obj['object']
    draw.text((x,y), objectName, fill='red')
//Face API Sample
import requests
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
Subcription Key와 서비스의 주소를 설정합니다.
subscription_key = '제공된 키'
faceDetection_url = '제공된 주소'
분석에 사용할 이미지를 확인합니다.
https://lastfm.freetls.fastly.net/i/u/770x0/87a22d9018601b87cdce79288b66f5b2.jpg
image_url = 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT3c1OLAzAOAGz4lgqGHqDrhSe-4Ex-RnNp0g&usqp=CAU'
img = Image.open(BytesIO(requests.get(image_url).content))
img
, emotion
headers = {'Ocp-Apim-Subscription-key': subscription_key}
params = {
    'returnFaceID':'true',
    'returnFaceAttributes': 'age, gender, emotion'
}
}
data = {'url': image_url}
서비스를 호출해서 결과를 확인합니다.
response = requests.post(faceDetection_url,
                        headers=headers,
                        params=params,
                        json=data)
faces = response.json()
  
faces
//[{'faceId': 'c680bcc2-444e-47f6-82c2-1ba145eb9496',
  'faceRectangle': {'top': 64, 'left': 102, 'width': 87, 'height': 87},
  'faceAttributes': {'gender': 'female',
   'age': 8.0,
   'emotion': {'anger': 0.0,
    'contempt': 0.0,
    'disgust': 0.0,
    'fear': 0.0,
    'happiness': 0.997,
    'neutral': 0.0,
    'sadness': 0.0,
    'surprise': 0.003}}}]
draw = ImageDraw.Draw(img)
  
for face in faces:
    rect = face['faceRectangle']
    left = rect['left']
    top = rect['top']
    width = rect['width']
    height = rect['height']
draw = ImageDraw.Draw(img)
  
for face in faces:
    rect = face['faceRectangle']
    left = rect['left']
    top = rect['top']
    width = rect['width']
    height = rect['height']
    draw.rectangle(((left,top),(left+width,top+height)), outline='red')
    face_info = face['faceAttributes']
    emotion = face_info['emotion']
    happiness = emotion['happiness']
    gender = face_info['gender']
    result = 'Gender:' + gender + ' happiness:' + str(happiness * 100)
    draw.text((left, top), result, fill='red')
img
  plotlib.pyplot as plt
import requests
from PIL import Image
from io import BytesIO
import matplotlib.pyplot as plt
subscription_key=''
subscription_key = '제공된 키'
vision_base_url = 'https://daeguaivision00.cognitiveservices.azure.com/vision/v2.0/'
ocr
ocr_url = vision_base_url + 'ocr'
    
//분석할 이미지를 셋팅한다.
    
image_url = 'https://upload.wikimedia.org/wikipedia/commons/thumb/a/af/Atomist_quote_from_Democritus.png/338px-Atomist_quote_from_Democritus.png'
img = Image.open(BytesIO(requests.get(image_url).content))
img = Image.open(BytesIO(requests.get(image_url).content))
img
img
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params  = {'language': 'unk', 'detectOrientation': 'true'}
data    = {'url': image_url}
response = requests.post(ocr_url,
                        headers=headers,
                        params=params,
                        json=data)
analysis = response.json()
analysis
analysis
line_infos = [region["lines"] for region in analysis["regions"]]
word_infos = []
for line in line_infos:
    for word_metadata in line:
        for word_info in word_metadata["words"]:
            word_infos.append(word_info)
word_infos