WIDER数据集的相关标注,逐一显示landmark dataset所有图片的标注

WIDER数据集的相关标注,逐一显示landmark dataset所有图片的标注 mini菜 2023-06-02 18:43:55 1293

本文解释如何显示WIDER数据集及显示相关标注。

如何显示coco数据集的图片及查看标注的质量请参考前面的文章《保存coco dataset注释为单一文件,并逐一显示所有图片的mask》。

根据libfacedetection.train(https://github.com/ShiqiYu/libfacedetection.train)的数据结构,我们看一下其中的标注trainset.json的效果,

  1. $ tree data/widerface
  2. data/widerface
  3. ├── eval\_tools
  4. ├── wider\_face\_split
  5. ├── WIDER\_test
  6. ├── WIDER\_train
  7. ├── WIDER\_val
  8. └── trainset.json

文件trainset.json 比较大,直接打开要好久,我摘录几条看一下格式

  1. {
  2. "images":
  3. [
  4. {"file_name": "./44--Aerobics/44_Aerobics_Aerobics_44_53.jpg", "height": 683, "width": 1024, "id": 0},
  5. {"file_name": "./44--Aerobics/44_Aerobics_Aerobics_44_28.jpg", "height": 736, "width": 1024, "id": 1},
  6. {"file_name": "./44--Aerobics/44_Aerobics_Aerobics_44_591.jpg", "height": 936, "width": 1024, "id": 2},
  7. {"file_name": "./44--Aerobics/44_Aerobics_Aerobics_44_46.jpg", "height": 520, "width": 1024, "id": 3},
  8. {"file_name": "./44--Aerobics/44_Aerobics_Aerobics_44_321.jpg", "height": 1392, "width": 1024, "id": 4},
  9. {"file_name": "./44--Aerobics/44_Aerobics_Aerobics_44_946.jpg", "height": 1001, "width": 1024, "id": 5},
  10. {"file_name": "./44--Aerobics/44_Aerobics_Aerobics_44_307.jpg", "height": 1139, "width": 1024, "id": 6},
  11. {"file_name": "./44--Aerobics/44_Aerobics_Aerobics_44_943.jpg", "height": 1156, "width": 1024, "id": 7},
  12. {"file_name": "./44--Aerobics/44_Aerobics_Aerobics_44_711.jpg", "height": 1656, "width": 1024, "id": 8},
  13. ...
  14. {"file_name": "./27--Spa/27_Spa_Spa_27_110.jpg", "height": 307, "width": 1024, "id": 12860},
  15. {"file_name": "./27--Spa/27_Spa_Spa_27_879.jpg", "height": 1024, "width": 1024, "id": 12861},
  16. {"file_name": "./27--Spa/27_Spa_Spa_27_219.jpg", "height": 682, "width": 1024, "id": 12862}
  17. ],
  18. "annotations":
  19. [
  20. {"segmentation": [[421.1, 133.2, 445.5, 125.5, 432.6, 145.4, 432.3, 159.9, 450.6, 153.4]], "area": 4356.76, "iscrowd": 0, "image_id": 0, "bbox": [411.5, 100.1, 59.6, 73.1], "category_id": 1, "id": 0, "ignore": 0},
  21. {"segmentation": [[537.6, 120.5, 546.8, 119.1, 541.2, 125.2, 539.9, 130.7, 547.1, 129.5]], "area": 585.66, "iscrowd": 0, "image_id": 0, "bbox": [534.4, 110.9, 22.7, 25.8], "category_id": 1, "id": 1, "ignore": 0},
  22. {"segmentation": [[104.5, 152.8, 116.5, 150.9, 109.9, 158.4, 107.3, 164.1, 117.7, 162.5]], "area": 953.2800000000001, "iscrowd": 0, "image_id": 0, "bbox": [99.3, 139.2, 28.8, 33.1], "category_id": 1, "id": 2, "ignore": 0},
  23. {"segmentation": [[823.0, 102.6, 832.1, 102.6, 826.2, 107.1, 823.6, 112.3, 830.9, 112.2]], "area": 497.96000000000004, "iscrowd": 0, "image_id": 0, "bbox": [819.0, 95.6, 21.1, 23.6], "category_id": 1, "id": 3, "ignore": 0},
  24. {"segmentation": [[955.1, 94.6, 971.3, 96.7, 961.0, 106.2, 954.8, 111.8, 966.8, 113.3]], "area": 1760.5900000000001, "iscrowd": 0, "image_id": 0, "bbox": [945.4, 76.2, 37.7, 46.7], "category_id": 1, "id": 4, "ignore": 0},
  25. {"segmentation": [[597.0, 121.9, 603.2, 121.7, 600.3, 125.3, 598.1, 127.7, 602.5, 127.5]], "area": 195.35999999999999, "iscrowd": 0, "image_id": 0, "bbox": [593.3, 115.6, 13.2, 14.8], "category_id": 1, "id": 5, "ignore": 0},
  26. {"segmentation": [[756.2, 115.3, 762.5, 115.1, 759.5, 118.2, 757.2, 121.5, 762.1, 121.3]], "area": 221.1, "iscrowd": 0, "image_id": 0, "bbox": [752.5, 108.7, 13.4, 16.5], "category_id": 1, "id": 6, "ignore": 0},
  27. {"segmentation": [[358.1, 117.4, 372.8, 116.2, 370.0, 123.7, 361.8, 134.2, 373.6, 133.4]], "area": 1874.5200000000002, "iscrowd": 0, "image_id": 0, "bbox": [340.3, 96.7, 38.1, 49.2], "category_id": 1, "id": 7, "ignore": 0},
  28. {"segmentation": [[379.5, 129.0, 389.0, 129.0, 384.7, 133.6, 380.9, 138.7, 387.5, 138.7]], "area": 552.16, "iscrowd": 0, "image_id": 0, "bbox": [373.1, 116.6, 20.3, 27.2], "category_id": 1, "id": 8, "ignore": 0},
  29. {"segmentation": [[709.0, 126.4, 713.6, 126.1, 711.4, 128.5, 709.7, 130.8, 713.6, 130.6]], "area": 124.23, "iscrowd": 0, "image_id": 0, "bbox": [706.5, 121.5, 10.1, 12.3], "category_id": 1, "id": 9, "ignore": 0},
  30. {"segmentation": [[572.8, 126.0, 581.4, 126.0, 576.7, 131.5, 574.6, 134.5, 580.3, 134.4]], "area": 371.05, "iscrowd": 0, "image_id": 0, "bbox": [569.0, 116.7, 18.1, 20.5], "category_id": 1, "id": 10, "ignore": 0},
  31. {"segmentation": [[931.5, 271.4, 943.9, 269.6, 936.0, 278.6, 935.2, 286.0, 944.5, 284.4]], "area": 1200.3700000000001, "iscrowd": 0, "image_id": 1, "bbox": [927.0, 255.3, 30.7, 39.1], "category_id": 1, "id": 11, "ignore": 0},
  32. {"segmentation": [[473.7, 339.6, 481.5, 339.6, 474.1, 345.6, 475.3, 351.7, 481.4, 351.4]], "area": 818.4, "iscrowd": 0, "image_id": 1, "bbox": [471.2, 326.5, 24.8, 33.0], "category_id": 1, "id": 12, "ignore": 0},
  33. {"segmentation": [[28.5, 326.5, 34.1, 323.5, 31.7, 333.3, 39.8, 337.5, 42.5, 334.7]], "area": 912.0300000000001, "iscrowd": 0, "image_id": 1, "bbox": [23.9, 310.9, 30.1, 30.3], "category_id": 1, "id": 13, "ignore": 0},
  34. {"segmentation": [[413.6, 314.8, 418.6, 315.9, 412.2, 319.4, 412.7, 324.3, 416.4, 324.8]], "area": 477.52, "iscrowd": 0, "image_id": 1, "bbox": [410.3, 305.7, 18.8, 25.4], "category_id": 1, "id": 14, "ignore": 0},
  35. {"segmentation": [[599.5, 263.3, 614.7, 263.0, 603.4, 272.6, 601.1, 283.4, 612.3, 283.2]], "area": 1978.4599999999998, "iscrowd": 0, "image_id": 1, "bbox": [594.8, 242.8, 37.4, 52.9], "category_id": 1, "id": 15, "ignore": 0},
  36. {"segmentation": [[828.2, 290.6, 839.6, 290.0, 832.2, 297.0, 830.0, 303.8, 838.8, 303.3]], "area": 865.8, "iscrowd": 0, "image_id": 1, "bbox": [824.6, 278.5, 26.0, 33.3], "category_id": 1, "id": 16, "ignore": 0},
  37. ...
  38. {"segmentation": [[738.1, 329.4, 747.3, 329.3, 741.2, 334.4, 738.8, 340.2, 746.2, 340.1]], "area": 607.7599999999999, "iscrowd": 0, "image_id": 1, "bbox": [734.6, 319.3, 21.4, 28.4], "category_id": 1, "id": 17, "ignore": 0},
  39. {"segmentation": [[876.7, 210.8, 877.4, 211.2, 874.3, 218.4, 878.8, 225.6, 879.2, 225.7]], "area": 847.5, "iscrowd": 0, "image_id": 12862, "bbox": [872.6, 197.0, 22.6, 37.5], "category_id": 1, "id": 113613, "ignore": 0}
  40. ],
  41. "categories":
  42. [
  43. {"name": "background", "id": 0},
  44. {"name": "face", "id": 1}
  45. ]
  46. }

典型的coco数据结构,我基本参考了原来的cocoapi,但也做了不小的改动,主要包括:

  1. 显示方式发生了变化,这次简单地显示成多边形吧(其实也可以用数据点或数字)
  2. mask(RLE)不需要了,因此没有maskutil,无需编译安装,直接python搞定

源码分成两个文件,一个是主文件,随便取个名吧:unknown.py

  1. # @MxTan from SpaceVision SZ Co.Ltd
  2. #
  3. # @brief for display landmark annotations piece by piece
  4. #
  5. # ref. windows version cocoapi if you need a mask version
  6. # https://github.com/philferriere/cocoapi
  7. #
  8. #
  9. from CoLandMark import LandMark
  10. import numpy as np
  11. import skimage.io as io #conda install scikit-image
  12. import json
  13. import os
  14. import matplotlib as mpl
  15. mpl.use('TkAgg')
  16. import pylab
  17. import matplotlib.rcsetup as rcsetup
  18. pylab.rcParams['figure.figsize'] = (8.0, 10.0)
  19. dataDir='D:/vsAI/libfacedetectiontrain/data/widerface/WIDER_train/images'
  20. annFile= 'trainset.json'
  21. # initialize COCO api for instance annotations
  22. coco=LandMark(annFile)
  23. # display COCO categories
  24. catIds = coco.getCatIds()
  25. cats = coco.loadCats(catIds)
  26. nms=[cat['name'] for cat in cats]
  27. print('COCO format categories: \n{}\n'.format(' '.join(nms)))
  28. # recursively display all images and its masks
  29. imgIds = coco.getImgIds()
  30. for id in imgIds:
  31. mpl.pyplot.clf() #put a stop breakpoint here, each cycle you will see a marked image
  32. annIds = coco.getAnnIds([id], catIds=catIds, iscrowd=None)
  33. anns = coco.loadAnns(annIds)
  34. imgIds = coco.getImgIds(imgIds = [id])
  35. img = coco.loadImgs(imgIds[0])[0]
  36. #----- save seperate image ----
  37. #file_name_ext='./WIDER_train/images/' + img['file_name']
  38. #(filename,extension) = os.path.splitext(file_name_ext)
  39. #file_path = "coco/" + filename + ".json"
  40. #data = {"annotations":anns}
  41. #with open(file_path, 'w') as result_file:
  42. # json.dump(data, result_file)
  43. #----display image----
  44. file_path = '{}/{}'.format(dataDir,img['file_name'])
  45. I = io.imread(file_path)
  46. #NOTE: the above method is equivalent to the following format
  47. #I = io.imread('%s/%s'%(dataDir,img['file_name']))
  48. mpl.pyplot.imshow(I)
  49. mpl.pyplot.axis('off')
  50. coco.showAnns(anns)

标注工具文件取名叫做CoLandMark.py(原来叫COCO.py),里面那个类改名为LandMark,以避免同时使用原来的CoCo时冲突。

  1. __author__ = 'tylin'
  2. __version__ = '2.0'
  3. # A copy from CocoApi, but some modifications are made to cope withe the landmark display
  4. #
  5. # AN alternative import lib for landmark points (NO area rle code required, so we removed the mask part)
  6. # The following API functions are defined:
  7. # COCO - COCO api class that loads COCO annotation file and prepare data structures.
  8. # decodeMask - Decode binary mask M encoded via run-length encoding.
  9. # encodeMask - Encode binary mask M using run-length encoding.
  10. # getAnnIds - Get ann ids that satisfy given filter conditions.
  11. # getCatIds - Get cat ids that satisfy given filter conditions.
  12. # getImgIds - Get img ids that satisfy given filter conditions.
  13. # loadAnns - Load anns with the specified ids.
  14. # loadCats - Load cats with the specified ids.
  15. # loadImgs - Load imgs with the specified ids.
  16. # annToMask - Convert segmentation in an annotation to binary mask.
  17. # showAnns - Display the specified annotations.
  18. # loadRes - Load algorithm results and create API for accessing them.
  19. # download - Download COCO images from mscoco.org server.
  20. # Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
  21. # Help on each functions can be accessed by: "help COCO>function".
  22. # See also COCO>decodeMask,
  23. # COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
  24. # COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
  25. # COCO>loadImgs, COCO>annToMask, COCO>showAnns
  26. # Microsoft COCO Toolbox. version 2.0
  27. # Data, paper, and tutorials available at: http://mscoco.org/
  28. # Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
  29. # Licensed under the Simplified BSD License [see bsd.txt]
  30. import json
  31. import time
  32. import numpy as np
  33. import copy
  34. import itertools
  35. import matplotlib
  36. matplotlib.use('Agg')
  37. import matplotlib.pyplot as plt
  38. from matplotlib.collections import PatchCollection
  39. from matplotlib.patches import Polygon
  40. import os
  41. from collections import defaultdict
  42. import sys
  43. PYTHON_VERSION = sys.version_info[0]
  44. if PYTHON_VERSION == 2:
  45. from urllib import urlretrieve
  46. elif PYTHON_VERSION == 3:
  47. from urllib.request import urlretrieve
  48. def _isArrayLike(obj):
  49. return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
  50. class LandMark:
  51. def __init__(self, annotation_file=None):
  52. """
  53. Constructor of Microsoft COCO helper class for reading and visualizing annotations.
  54. :param annotation_file (str): location of annotation file
  55. :param image_folder (str): location to the folder that hosts images.
  56. :return:
  57. """
  58. # load dataset
  59. self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
  60. self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
  61. if not annotation_file == None:
  62. print('loading annotations into memory...')
  63. tic = time.time()
  64. dataset = json.load(open(annotation_file, 'r'))
  65. assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
  66. print('Done (t={:0.2f}s)'.format(time.time()- tic))
  67. self.dataset = dataset
  68. self.createIndex()
  69. def createIndex(self):
  70. # create index
  71. print('creating index...')
  72. anns, cats, imgs = {}, {}, {}
  73. imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
  74. if 'annotations' in self.dataset:
  75. for ann in self.dataset['annotations']:
  76. imgToAnns[ann['image_id']].append(ann)
  77. anns[ann['id']] = ann
  78. if 'images' in self.dataset:
  79. for img in self.dataset['images']:
  80. imgs[img['id']] = img
  81. if 'categories' in self.dataset:
  82. for cat in self.dataset['categories']:
  83. cats[cat['id']] = cat
  84. if 'annotations' in self.dataset and 'categories' in self.dataset:
  85. for ann in self.dataset['annotations']:
  86. catToImgs[ann['category_id']].append(ann['image_id'])
  87. print('index created!')
  88. # create class members
  89. self.anns = anns
  90. self.imgToAnns = imgToAnns
  91. self.catToImgs = catToImgs
  92. self.imgs = imgs
  93. self.cats = cats
  94. def info(self):
  95. """
  96. Print information about the annotation file.
  97. :return:
  98. """
  99. for key, value in self.dataset['info'].items():
  100. print('{}: {}'.format(key, value))
  101. def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
  102. """
  103. Get ann ids that satisfy given filter conditions. default skips that filter
  104. :param imgIds (int array) : get anns for given imgs
  105. catIds (int array) : get anns for given cats
  106. areaRng (float array) : get anns for given area range (e.g. [0 inf])
  107. iscrowd (boolean) : get anns for given crowd label (False or True)
  108. :return: ids (int array) : integer array of ann ids
  109. """
  110. imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
  111. catIds = catIds if _isArrayLike(catIds) else [catIds]
  112. if len(imgIds) == len(catIds) == len(areaRng) == 0:
  113. anns = self.dataset['annotations']
  114. else:
  115. if not len(imgIds) == 0:
  116. lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
  117. anns = list(itertools.chain.from_iterable(lists))
  118. else:
  119. anns = self.dataset['annotations']
  120. anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
  121. anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
  122. if not iscrowd == None:
  123. ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
  124. else:
  125. ids = [ann['id'] for ann in anns]
  126. return ids
  127. def getCatIds(self, catNms=[], supNms=[], catIds=[]):
  128. """
  129. filtering parameters. default skips that filter.
  130. :param catNms (str array) : get cats for given cat names
  131. :param supNms (str array) : get cats for given supercategory names
  132. :param catIds (int array) : get cats for given cat ids
  133. :return: ids (int array) : integer array of cat ids
  134. """
  135. catNms = catNms if _isArrayLike(catNms) else [catNms]
  136. supNms = supNms if _isArrayLike(supNms) else [supNms]
  137. catIds = catIds if _isArrayLike(catIds) else [catIds]
  138. if len(catNms) == len(supNms) == len(catIds) == 0:
  139. cats = self.dataset['categories']
  140. else:
  141. cats = self.dataset['categories']
  142. cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
  143. cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
  144. cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
  145. ids = [cat['id'] for cat in cats]
  146. return ids
  147. def getImgIds(self, imgIds=[], catIds=[]):
  148. '''
  149. Get img ids that satisfy given filter conditions.
  150. :param imgIds (int array) : get imgs for given ids
  151. :param catIds (int array) : get imgs with all given cats
  152. :return: ids (int array) : integer array of img ids
  153. '''
  154. imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
  155. catIds = catIds if _isArrayLike(catIds) else [catIds]
  156. if len(imgIds) == len(catIds) == 0:
  157. ids = self.imgs.keys()
  158. else:
  159. ids = set(imgIds)
  160. for i, catId in enumerate(catIds):
  161. if i == 0 and len(ids) == 0:
  162. ids = set(self.catToImgs[catId])
  163. else:
  164. ids &= set(self.catToImgs[catId])
  165. return list(ids)
  166. def loadAnns(self, ids=[]):
  167. """
  168. Load anns with the specified ids.
  169. :param ids (int array) : integer ids specifying anns
  170. :return: anns (object array) : loaded ann objects
  171. """
  172. if _isArrayLike(ids):
  173. return [self.anns[id] for id in ids]
  174. elif type(ids) == int:
  175. return [self.anns[ids]]
  176. def loadCats(self, ids=[]):
  177. """
  178. Load cats with the specified ids.
  179. :param ids (int array) : integer ids specifying cats
  180. :return: cats (object array) : loaded cat objects
  181. """
  182. if _isArrayLike(ids):
  183. return [self.cats[id] for id in ids]
  184. elif type(ids) == int:
  185. return [self.cats[ids]]
  186. def loadImgs(self, ids=[]):
  187. """
  188. Load anns with the specified ids.
  189. :param ids (int array) : integer ids specifying img
  190. :return: imgs (object array) : loaded img objects
  191. """
  192. if _isArrayLike(ids):
  193. return [self.imgs[id] for id in ids]
  194. elif type(ids) == int:
  195. return [self.imgs[ids]]
  196. def showAnns(self, anns):
  197. """
  198. Display the specified annotations.
  199. :param anns (array of object): annotations to display
  200. :return: None
  201. """
  202. if len(anns) == 0:
  203. return 0
  204. if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
  205. datasetType = 'instances'
  206. elif 'caption' in anns[0]:
  207. datasetType = 'captions'
  208. else:
  209. raise Exception('datasetType not supported')
  210. if datasetType == 'instances':
  211. #plt.clf() #clear the foreground image
  212. #plt.cla() # clear the axis
  213. ax = plt.gca()
  214. ax.set_autoscale_on(False)
  215. polygons = []
  216. color = []
  217. for ann in anns:
  218. c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
  219. if 'segmentation' in ann:
  220. if type(ann['segmentation']) == list:
  221. # polygon
  222. for seg in ann['segmentation']:
  223. poly = np.array(seg).reshape((int(len(seg)/2), 2))
  224. polygons.append(Polygon(poly))
  225. color.append(c)
  226. #else:
  227. # # mask
  228. # t = self.imgs[ann['image_id']]
  229. # if type(ann['segmentation']['counts']) == list:
  230. # rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
  231. # else:
  232. # rle = [ann['segmentation']]
  233. # m = maskUtils.decode(rle)
  234. # img = np.ones( (m.shape[0], m.shape[1], 3) )
  235. # if ann['iscrowd'] == 1:
  236. # color_mask = np.array([2.0,166.0,101.0])/255
  237. # if ann['iscrowd'] == 0:
  238. # color_mask = np.random.random((1, 3)).tolist()[0]
  239. # for i in range(3):
  240. # img[:,:,i] = color_mask[i]
  241. # ax.imshow(np.dstack( (img, m*0.5) ))
  242. if 'keypoints' in ann and type(ann['keypoints']) == list:
  243. # turn skeleton into zero-based index
  244. sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
  245. kp = np.array(ann['keypoints'])
  246. x = kp[0::3]
  247. y = kp[1::3]
  248. v = kp[2::3]
  249. for sk in sks:
  250. if np.all(v[sk]>0):
  251. plt.plot(x[sk],y[sk], linewidth=3, color=c)
  252. plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
  253. plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
  254. #p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
  255. #ax.add_collection(p)
  256. p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
  257. ax.add_collection(p)
  258. elif datasetType == 'captions':
  259. for ann in anns:
  260. print(ann['caption'])
  261. def loadRes(self, resFile):
  262. """
  263. Load result file and return a result api object.
  264. :param resFile (str) : file name of result file
  265. :return: res (obj) : result api object
  266. """
  267. res = LandMark()
  268. res.dataset['images'] = [img for img in self.dataset['images']]
  269. print('Loading and preparing results...')
  270. tic = time.time()
  271. # Check result type in a way compatible with Python 2 and 3.
  272. if PYTHON_VERSION == 2:
  273. is_string = isinstance(resFile, basestring) # Python 2
  274. elif PYTHON_VERSION == 3:
  275. is_string = isinstance(resFile, str) # Python 3
  276. if is_string:
  277. anns = json.load(open(resFile))
  278. elif type(resFile) == np.ndarray:
  279. anns = self.loadNumpyAnnotations(resFile)
  280. else:
  281. anns = resFile
  282. assert type(anns) == list, 'results in not an array of objects'
  283. annsImgIds = [ann['image_id'] for ann in anns]
  284. assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
  285. 'Results do not correspond to current coco set'
  286. if 'caption' in anns[0]:
  287. imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
  288. res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
  289. for id, ann in enumerate(anns):
  290. ann['id'] = id+1
  291. elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
  292. res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
  293. for id, ann in enumerate(anns):
  294. bb = ann['bbox']
  295. x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
  296. if not 'segmentation' in ann:
  297. ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
  298. ann['area'] = bb[2]*bb[3]
  299. ann['id'] = id+1
  300. ann['iscrowd'] = 0
  301. elif 'segmentation' in anns[0]:
  302. res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
  303. for id, ann in enumerate(anns):
  304. # now only support compressed RLE format as segmentation results
  305. #ann['area'] = maskUtils.area(ann['segmentation'])
  306. #if not 'bbox' in ann:
  307. # ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
  308. ann['id'] = id+1
  309. ann['iscrowd'] = 0
  310. elif 'keypoints' in anns[0]:
  311. res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
  312. for id, ann in enumerate(anns):
  313. s = ann['keypoints']
  314. x = s[0::3]
  315. y = s[1::3]
  316. x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
  317. ann['area'] = (x1-x0)*(y1-y0)
  318. ann['id'] = id + 1
  319. ann['bbox'] = [x0,y0,x1-x0,y1-y0]
  320. print('DONE (t={:0.2f}s)'.format(time.time()- tic))
  321. res.dataset['annotations'] = anns
  322. res.createIndex()
  323. return res
  324. def download(self, tarDir = None, imgIds = [] ):
  325. '''
  326. Download COCO images from mscoco.org server.
  327. :param tarDir (str): COCO results directory name
  328. imgIds (list): images to be downloaded
  329. :return:
  330. '''
  331. if tarDir is None:
  332. print('Please specify target directory')
  333. return -1
  334. if len(imgIds) == 0:
  335. imgs = self.imgs.values()
  336. else:
  337. imgs = self.loadImgs(imgIds)
  338. N = len(imgs)
  339. if not os.path.exists(tarDir):
  340. os.makedirs(tarDir)
  341. for i, img in enumerate(imgs):
  342. tic = time.time()
  343. fname = os.path.join(tarDir, img['file_name'])
  344. if not os.path.exists(fname):
  345. urlretrieve(img['coco_url'], fname)
  346. print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
  347. def loadNumpyAnnotations(self, data):
  348. """
  349. Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
  350. :param data (numpy.ndarray)
  351. :return: annotations (python nested list)
  352. """
  353. print('Converting ndarray to lists...')
  354. assert(type(data) == np.ndarray)
  355. print(data.shape)
  356. assert(data.shape[1] == 7)
  357. N = data.shape[0]
  358. ann = []
  359. for i in range(N):
  360. if i % 1000000 == 0:
  361. print('{}/{}'.format(i,N))
  362. ann += [{
  363. 'image_id' : int(data[i, 0]),
  364. 'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
  365. 'score' : data[i, 5],
  366. 'category_id': int(data[i, 6]),
  367. }]
  368. return ann
  369. # def annToRLE(self, ann):
  370. # """
  371. # Convert annotation which can be polygons, uncompressed RLE to RLE.
  372. # :return: binary mask (numpy 2D array)
  373. # """
  374. # t = self.imgs[ann['image_id']]
  375. # h, w = t['height'], t['width']
  376. # segm = ann['segmentation']
  377. # if type(segm) == list:
  378. # # polygon -- a single object might consist of multiple parts
  379. # # we merge all parts into one mask rle code
  380. # rles = maskUtils.frPyObjects(segm, h, w)
  381. # rle = maskUtils.merge(rles)
  382. # elif type(segm['counts']) == list:
  383. # # uncompressed RLE
  384. # rle = maskUtils.frPyObjects(segm, h, w)
  385. # else:
  386. # # rle
  387. # rle = ann['segmentation']
  388. # return rle
  389. # def annToMask(self, ann):
  390. # """
  391. # Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
  392. # :return: binary mask (numpy 2D array)
  393. # """
  394. # rle = self.annToRLE(ann)
  395. # m = maskUtils.decode(rle)
  396. # return m

注释掉的部分我没有删除,大家可以和原文比较。

声明:本文内容由易百纳平台入驻作者撰写,文章观点仅代表作者本人,不代表易百纳立场。如有内容侵权或者其他问题,请联系本站进行删除。
mini菜
红包 点赞 收藏 评论 打赏
评论
0个
内容存在敏感词
手气红包
    易百纳技术社区暂无数据
相关专栏
置顶时间设置
结束时间
删除原因
  • 广告/SPAM
  • 恶意灌水
  • 违规内容
  • 文不对题
  • 重复发帖
打赏作者
易百纳技术社区
mini菜
您的支持将鼓励我继续创作!
打赏金额:
¥1易百纳技术社区
¥5易百纳技术社区
¥10易百纳技术社区
¥50易百纳技术社区
¥100易百纳技术社区
支付方式:
微信支付
支付宝支付
易百纳技术社区微信支付
易百纳技术社区
打赏成功!

感谢您的打赏,如若您也想被打赏,可前往 发表专栏 哦~

举报反馈

举报类型

  • 内容涉黄/赌/毒
  • 内容侵权/抄袭
  • 政治相关
  • 涉嫌广告
  • 侮辱谩骂
  • 其他

详细说明

审核成功

发布时间设置
发布时间:
是否关联周任务-专栏模块

审核失败

失败原因
备注
拼手气红包 红包规则
祝福语
恭喜发财,大吉大利!
红包金额
红包最小金额不能低于5元
红包数量
红包数量范围10~50个
余额支付
当前余额:
可前往问答、专栏板块获取收益 去获取
取 消 确 定

小包子的红包

恭喜发财,大吉大利

已领取20/40,共1.6元 红包规则

    易百纳技术社区