autocrop.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. import cv2
  2. import requests
  3. import os
  4. import numpy as np
  5. from PIL import ImageDraw
  6. from modules import paths_internal
  7. from pkg_resources import parse_version
  8. GREEN = "#0F0"
  9. BLUE = "#00F"
  10. RED = "#F00"
  11. def crop_image(im, settings):
  12. """ Intelligently crop an image to the subject matter """
  13. scale_by = 1
  14. if is_landscape(im.width, im.height):
  15. scale_by = settings.crop_height / im.height
  16. elif is_portrait(im.width, im.height):
  17. scale_by = settings.crop_width / im.width
  18. elif is_square(im.width, im.height):
  19. if is_square(settings.crop_width, settings.crop_height):
  20. scale_by = settings.crop_width / im.width
  21. elif is_landscape(settings.crop_width, settings.crop_height):
  22. scale_by = settings.crop_width / im.width
  23. elif is_portrait(settings.crop_width, settings.crop_height):
  24. scale_by = settings.crop_height / im.height
  25. im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
  26. im_debug = im.copy()
  27. focus = focal_point(im_debug, settings)
  28. # take the focal point and turn it into crop coordinates that try to center over the focal
  29. # point but then get adjusted back into the frame
  30. y_half = int(settings.crop_height / 2)
  31. x_half = int(settings.crop_width / 2)
  32. x1 = focus.x - x_half
  33. if x1 < 0:
  34. x1 = 0
  35. elif x1 + settings.crop_width > im.width:
  36. x1 = im.width - settings.crop_width
  37. y1 = focus.y - y_half
  38. if y1 < 0:
  39. y1 = 0
  40. elif y1 + settings.crop_height > im.height:
  41. y1 = im.height - settings.crop_height
  42. x2 = x1 + settings.crop_width
  43. y2 = y1 + settings.crop_height
  44. crop = [x1, y1, x2, y2]
  45. results = []
  46. results.append(im.crop(tuple(crop)))
  47. if settings.annotate_image:
  48. d = ImageDraw.Draw(im_debug)
  49. rect = list(crop)
  50. rect[2] -= 1
  51. rect[3] -= 1
  52. d.rectangle(rect, outline=GREEN)
  53. results.append(im_debug)
  54. if settings.desktop_view_image:
  55. im_debug.show()
  56. return results
  57. def focal_point(im, settings):
  58. corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
  59. entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
  60. face_points = image_face_points(im, settings) if settings.face_points_weight > 0 else []
  61. pois = []
  62. weight_pref_total = 0
  63. if corner_points:
  64. weight_pref_total += settings.corner_points_weight
  65. if entropy_points:
  66. weight_pref_total += settings.entropy_points_weight
  67. if face_points:
  68. weight_pref_total += settings.face_points_weight
  69. corner_centroid = None
  70. if corner_points:
  71. corner_centroid = centroid(corner_points)
  72. corner_centroid.weight = settings.corner_points_weight / weight_pref_total
  73. pois.append(corner_centroid)
  74. entropy_centroid = None
  75. if entropy_points:
  76. entropy_centroid = centroid(entropy_points)
  77. entropy_centroid.weight = settings.entropy_points_weight / weight_pref_total
  78. pois.append(entropy_centroid)
  79. face_centroid = None
  80. if face_points:
  81. face_centroid = centroid(face_points)
  82. face_centroid.weight = settings.face_points_weight / weight_pref_total
  83. pois.append(face_centroid)
  84. average_point = poi_average(pois, settings)
  85. if settings.annotate_image:
  86. d = ImageDraw.Draw(im)
  87. max_size = min(im.width, im.height) * 0.07
  88. if corner_centroid is not None:
  89. color = BLUE
  90. box = corner_centroid.bounding(max_size * corner_centroid.weight)
  91. d.text((box[0], box[1] - 15), f"Edge: {corner_centroid.weight:.02f}", fill=color)
  92. d.ellipse(box, outline=color)
  93. if len(corner_points) > 1:
  94. for f in corner_points:
  95. d.rectangle(f.bounding(4), outline=color)
  96. if entropy_centroid is not None:
  97. color = "#ff0"
  98. box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
  99. d.text((box[0], box[1] - 15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color)
  100. d.ellipse(box, outline=color)
  101. if len(entropy_points) > 1:
  102. for f in entropy_points:
  103. d.rectangle(f.bounding(4), outline=color)
  104. if face_centroid is not None:
  105. color = RED
  106. box = face_centroid.bounding(max_size * face_centroid.weight)
  107. d.text((box[0], box[1] - 15), f"Face: {face_centroid.weight:.02f}", fill=color)
  108. d.ellipse(box, outline=color)
  109. if len(face_points) > 1:
  110. for f in face_points:
  111. d.rectangle(f.bounding(4), outline=color)
  112. d.ellipse(average_point.bounding(max_size), outline=GREEN)
  113. return average_point
  114. def image_face_points(im, settings):
  115. if settings.dnn_model_path is not None:
  116. detector = cv2.FaceDetectorYN.create(
  117. settings.dnn_model_path,
  118. "",
  119. (im.width, im.height),
  120. 0.9, # score threshold
  121. 0.3, # nms threshold
  122. 5000 # keep top k before nms
  123. )
  124. faces = detector.detect(np.array(im))
  125. results = []
  126. if faces[1] is not None:
  127. for face in faces[1]:
  128. x = face[0]
  129. y = face[1]
  130. w = face[2]
  131. h = face[3]
  132. results.append(
  133. PointOfInterest(
  134. int(x + (w * 0.5)), # face focus left/right is center
  135. int(y + (h * 0.33)), # face focus up/down is close to the top of the head
  136. size=w,
  137. weight=1 / len(faces[1])
  138. )
  139. )
  140. return results
  141. else:
  142. np_im = np.array(im)
  143. gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
  144. tries = [
  145. [f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01],
  146. [f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05],
  147. [f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05],
  148. [f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05],
  149. [f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05],
  150. [f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05],
  151. [f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05],
  152. [f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05]
  153. ]
  154. for t in tries:
  155. classifier = cv2.CascadeClassifier(t[0])
  156. minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
  157. try:
  158. faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
  159. minNeighbors=7, minSize=(minsize, minsize),
  160. flags=cv2.CASCADE_SCALE_IMAGE)
  161. except Exception:
  162. continue
  163. if faces:
  164. rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
  165. return [PointOfInterest((r[0] + r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0] - r[2]),
  166. weight=1 / len(rects)) for r in rects]
  167. return []
  168. def image_corner_points(im, settings):
  169. grayscale = im.convert("L")
  170. # naive attempt at preventing focal points from collecting at watermarks near the bottom
  171. gd = ImageDraw.Draw(grayscale)
  172. gd.rectangle([0, im.height * .9, im.width, im.height], fill="#999")
  173. np_im = np.array(grayscale)
  174. points = cv2.goodFeaturesToTrack(
  175. np_im,
  176. maxCorners=100,
  177. qualityLevel=0.04,
  178. minDistance=min(grayscale.width, grayscale.height) * 0.06,
  179. useHarrisDetector=False,
  180. )
  181. if points is None:
  182. return []
  183. focal_points = []
  184. for point in points:
  185. x, y = point.ravel()
  186. focal_points.append(PointOfInterest(x, y, size=4, weight=1 / len(points)))
  187. return focal_points
  188. def image_entropy_points(im, settings):
  189. landscape = im.height < im.width
  190. portrait = im.height > im.width
  191. if landscape:
  192. move_idx = [0, 2]
  193. move_max = im.size[0]
  194. elif portrait:
  195. move_idx = [1, 3]
  196. move_max = im.size[1]
  197. else:
  198. return []
  199. e_max = 0
  200. crop_current = [0, 0, settings.crop_width, settings.crop_height]
  201. crop_best = crop_current
  202. while crop_current[move_idx[1]] < move_max:
  203. crop = im.crop(tuple(crop_current))
  204. e = image_entropy(crop)
  205. if (e > e_max):
  206. e_max = e
  207. crop_best = list(crop_current)
  208. crop_current[move_idx[0]] += 4
  209. crop_current[move_idx[1]] += 4
  210. x_mid = int(crop_best[0] + settings.crop_width / 2)
  211. y_mid = int(crop_best[1] + settings.crop_height / 2)
  212. return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
  213. def image_entropy(im):
  214. # greyscale image entropy
  215. # band = np.asarray(im.convert("L"))
  216. band = np.asarray(im.convert("1"), dtype=np.uint8)
  217. hist, _ = np.histogram(band, bins=range(0, 256))
  218. hist = hist[hist > 0]
  219. return -np.log2(hist / hist.sum()).sum()
  220. def centroid(pois):
  221. x = [poi.x for poi in pois]
  222. y = [poi.y for poi in pois]
  223. return PointOfInterest(sum(x) / len(pois), sum(y) / len(pois))
  224. def poi_average(pois, settings):
  225. weight = 0.0
  226. x = 0.0
  227. y = 0.0
  228. for poi in pois:
  229. weight += poi.weight
  230. x += poi.x * poi.weight
  231. y += poi.y * poi.weight
  232. avg_x = round(weight and x / weight)
  233. avg_y = round(weight and y / weight)
  234. return PointOfInterest(avg_x, avg_y)
  235. def is_landscape(w, h):
  236. return w > h
  237. def is_portrait(w, h):
  238. return h > w
  239. def is_square(w, h):
  240. return w == h
  241. model_dir_opencv = os.path.join(paths_internal.models_path, 'opencv')
  242. if parse_version(cv2.__version__) >= parse_version('4.8'):
  243. model_file_path = os.path.join(model_dir_opencv, 'face_detection_yunet_2023mar.onnx')
  244. model_url = 'https://github.com/opencv/opencv_zoo/blob/b6e370b10f641879a87890d44e42173077154a05/models/face_detection_yunet/face_detection_yunet_2023mar.onnx?raw=true'
  245. else:
  246. model_file_path = os.path.join(model_dir_opencv, 'face_detection_yunet.onnx')
  247. model_url = 'https://github.com/opencv/opencv_zoo/blob/91fb0290f50896f38a0ab1e558b74b16bc009428/models/face_detection_yunet/face_detection_yunet_2022mar.onnx?raw=true'
  248. def download_and_cache_models():
  249. if not os.path.exists(model_file_path):
  250. os.makedirs(model_dir_opencv, exist_ok=True)
  251. print(f"downloading face detection model from '{model_url}' to '{model_file_path}'")
  252. response = requests.get(model_url)
  253. with open(model_file_path, "wb") as f:
  254. f.write(response.content)
  255. return model_file_path
  256. class PointOfInterest:
  257. def __init__(self, x, y, weight=1.0, size=10):
  258. self.x = x
  259. self.y = y
  260. self.weight = weight
  261. self.size = size
  262. def bounding(self, size):
  263. return [
  264. self.x - size // 2,
  265. self.y - size // 2,
  266. self.x + size // 2,
  267. self.y + size // 2
  268. ]
  269. class Settings:
  270. def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False, dnn_model_path=None):
  271. self.crop_width = crop_width
  272. self.crop_height = crop_height
  273. self.corner_points_weight = corner_points_weight
  274. self.entropy_points_weight = entropy_points_weight
  275. self.face_points_weight = face_points_weight
  276. self.annotate_image = annotate_image
  277. self.desktop_view_image = False
  278. self.dnn_model_path = dnn_model_path