@Jose-goorm 그러면 이미지는 어떻게 확인할수있는 방법이 있습니까?
최해선
@최해선
Posts made by 최해선
-
RE: 구름IDE 파이썬,tensorflow 이미지 바로 볼수 있는 창 문의
-
구름IDE 파이썬,tensorflow 이미지 바로 볼수 있는 창 문의
지금 tensorflow 에서 Object detection 보고 따라 하고 있는데 코랩에서 하는 거는 바로바로 이미지가 뜨는데 구름IDE로 하려면 작업이 안되거나(죽었다 라고표시됨) 되더라도 결과값을 확인이 안됩니다. ```
code_text
For running inference on the TF-Hub module.
import tensorflow as tf
import tensorflow_hub as hub
For downloading the image.
import matplotlib.pyplot as plt
import tempfile
from six.moves.urllib.request import urlopen
from six import BytesIOFor drawing onto the image.
import numpy as np
from PIL import Image
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageOpsFor measuring the inference time.
import time
Print Tensorflow version
print(tf.version)
Check available GPU devices.
print("The following GPU are available: %s" % tf.test.gpu_device_name())
def display_image(image):
# fig = plt.figure(figsize=(20, 15))
plt.grid(False)
plt.imshow(image)def download_and_resize_image(url, new_width=256, new_height=256,
display=False):
_, filename = tempfile.mkstemp(suffix=".jpg")
response = urlopen(url)
image_data = response.read()
image_data = BytesIO(image_data)
pil_image = Image.open(image_data)
pil_image = ImageOps.fit(pil_image, (new_width, new_height),
Image.ANTIALIAS)
pil_image_rgb = pil_image.convert("RGB")
pil_image_rgb.save(filename, format="JPEG", quality=90)
print("Image downloaded to %s." % filename)
if display:
display_image(pil_image)
return filenamedef draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color,
font,
thickness=4,
display_str_list=()):
"""Adds a bounding box to an image."""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)], width=thickness, fill=color)# If the total height of the display strings added to the top of the boundi # box exceeds the top of the image, stack the strings below the bounding bo # instead of above. display_str_heights = [font.getsize(ds)[1] for ds in display_str_list] # Each display_str has a top and bottom margin of 0.05x. total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights) if top > total_display_str_height: text_bottom = top else: text_bottom = top + total_display_str_height # Reverse list and print from bottom to top. for display_str in display_str_list[::-1]: text_width, text_height = font.getsize(display_str) margin = np.ceil(0.05 * text_height) draw.rectangle([(left, text_bottom - text_height - 2 * margin), (left + text_width, text_bottom)], fill=color) draw.text((left + margin, text_bottom - text_height - margin), display_str, fill="black", font=font) text_bottom -= text_height - 2 * margin
def draw_boxes(image, boxes, class_names, scores, max_boxes=10, min_score=0.1):
"""Overlay labeled boxes on an image with formatted scores and label names
."""
colors = list(ImageColor.colormap.values())try: font = ImageFont.truetype("/usr/share/fonts/truetype/liberation/Libera\
tionSansNarrow-Regular.ttf", 25)
except IOError:
print("Font not found, using default font.")
font = ImageFont.load_default()for i in range(min(boxes.shape[0], max_boxes)): if scores[i] >= min_score: ymin, xmin, ymax, xmax = tuple(boxes[i]) display_str = "{}: {}%".format(class_names[i].decode("ascii"), int(100 * scores[i])) color = colors[hash(class_names[i]) % len(colors)] image_pil = Image.fromarray(np.uint8(image)).convert("RGB") draw_bounding_box_on_image( image_pil, ymin, xmin, ymax, xmax, color, font, display_str_list=[display_str]) np.copyto(image, np.array(image_pil)) return image
By Heiko Gorski, Source: https://commons.wikimedia.org/wiki/File:Naxos_Tavern
image_url = "https://upload.wikimedia.org/wikipedia/commons/6/60/Naxos_Ta
verna.jpg"
downloaded_image_path = download_and_resize_image(image_url, 1280, 856, True)module_handle = "https://tfhub.dev/google/faster_rcnn/openimages_v4/incep
tion_resnet_v2/1"detector = hub.load(module_handle).signatures['default']
def load_img(path):
img = tf.io.read_file(path)
img = tf.image.decode_jpeg(img, channels=3)
return imgdef run_detector(detector, path):
img = load_img(path)converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...] start_time = time.time() result = detector(converted_img) end_time = time.time() result = {key: value.numpy() for key, value in result.items()} print("Found %d objects." % len(result["detection_scores"])) print("Inference time: ", end_time-start_time) image_with_boxes = draw_boxes( img.numpy(), result["detection_boxes"], result["detection_class_entities"], result["detection_scores"]) display_image(image_with_boxes)
run_detector(detector, downloaded_image_path)