diff --git a/detect.py b/detect.py index 77502b0c5bee..0b1d93897d4c 100644 --- a/detect.py +++ b/detect.py @@ -21,9 +21,9 @@ from models.experimental import attempt_load from utils.datasets import LoadStreams, LoadImages -from utils.general import check_img_size, check_requirements, check_imshow, colorstr, non_max_suppression, \ +from utils.general import check_img_size, check_requirements, check_imshow, colorstr, is_ascii, non_max_suppression, \ apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path, save_one_box -from utils.plots import colors, Annotator +from utils.plots import Annotator, colors from utils.torch_utils import select_device, load_classifier, time_sync @@ -105,6 +105,7 @@ def wrap_frozen_graph(gd, inputs, outputs): output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size + ascii = is_ascii(names) # names are ascii (use PIL for UTF-8) # Dataloader if webcam: @@ -181,7 +182,7 @@ def wrap_frozen_graph(gd, inputs, outputs): s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop - annotator = Annotator(im0, line_width=line_thickness, pil=False) + annotator = Annotator(im0, line_width=line_thickness, pil=not ascii) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() diff --git a/models/common.py b/models/common.py index 0c60b39a483d..90bfef5124b3 100644 --- a/models/common.py +++ b/models/common.py @@ -18,9 +18,9 @@ from torch.cuda import amp from utils.datasets import exif_transpose, letterbox -from utils.general import colorstr, non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh, \ - save_one_box -from utils.plots import colors, Annotator +from utils.general import colorstr, increment_path, is_ascii, make_divisible, non_max_suppression, save_one_box, \ + scale_coords, xyxy2xywh +from utils.plots import Annotator, colors from utils.torch_utils import time_sync LOGGER = logging.getLogger(__name__) @@ -354,6 +354,7 @@ def __init__(self, imgs, pred, files, times=None, names=None, shape=None): self.imgs = imgs # list of images as numpy arrays self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) self.names = names # class names + self.ascii = is_ascii(names) # names are ascii (use PIL for UTF-8) self.files = files # image filenames self.xyxy = pred # xyxy pixels self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels @@ -371,7 +372,7 @@ def display(self, pprint=False, show=False, save=False, crop=False, render=False n = (pred[:, -1] == c).sum() # detections per class str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string if show or save or render or crop: - annotator = Annotator(im, pil=False) + annotator = Annotator(im, pil=not self.ascii) for *box, conf, cls in reversed(pred): # xyxy, confidence, class label = f'{self.names[int(cls)]} {conf:.2f}' if crop: diff --git a/utils/general.py b/utils/general.py index fe9a8ac537fb..ba1e4f58cd86 100755 --- a/utils/general.py +++ b/utils/general.py @@ -124,7 +124,7 @@ def is_pip(): def is_ascii(s=''): # Is string composed of all ASCII (no UTF) characters? - s = str(s) # convert to str() in case of None, etc. + s = str(s) # convert list, tuple, None, etc. to str return len(s.encode().decode('ascii', 'ignore')) == len(s)