-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgridextractor.py
271 lines (211 loc) · 9.87 KB
/
gridextractor.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
import numpy as np
import cv2.cv2 as cv2
import operator
def pre_process_image(img, skip_dilate=False, do_erode = False, kernel_digit = 5):
"""Uses a blurring function, adaptive thresholding and dilation to expose the main features of an image."""
# Gaussian blur with a kernal size (height, width) of 9.
# Note that kernal sizes must be positive and odd and the kernel must be square.
proc = cv2.GaussianBlur(img.copy(), (9, 9), 0)
# Adaptive threshold using 11 nearest neighbour pixels
proc = cv2.adaptiveThreshold(proc, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
# Invert colours, so gridlines have non-zero pixel values.
# Necessary to dilate the image, otherwise will look like erosion instead.
proc = cv2.bitwise_not(proc, proc)
if not skip_dilate:
# Dilate the image to increase the size of the grid lines.
kernel = np.array([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]],np.uint8)
proc = cv2.dilate(proc, kernel)
# doesnt make much difference
if do_erode:
kernel = np.ones((kernel_digit,kernel_digit),np.uint8)
proc = cv2.erode(img,kernel,iterations = 1)
return proc
def show_image(img):
"""Shows an image until any key is pressed"""
# print(type(img))
# print(img.shape)
cv2.imshow('image', img) # Display the image
cv2.waitKey(0) # Wait for any key to be pressed (with the image window active)
cv2.destroyAllWindows() # Close all windows
# return img
def find_corners_of_largest_polygon(img):
"""Finds the 4 extreme corners of the largest contour in the image."""
contours, _ = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Find contours
#assuming sudoku is clearly visible and one of the biggest elements of the photo,
#the sudoku will then cover the largest contour area
contours = sorted(contours, key=cv2.contourArea, reverse=True)
polygon = contours[0] # Largest image
# Use of `operator.itemgetter` with `max` and `min` allows us to get the index of the point
# Each point is an array of 1 coordinate, hence the [0] getter, then [0] or [1] used to get x and y respectively.
# Bottom-right point has the largest (x + y) value
# Top-left has point smallest (x + y) value
# Bottom-left point has smallest (x - y) value
# Top-right point has largest (x - y) value
bottom_right, _ = max(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_left, _ = min(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
bottom_left, _ = min(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_right, _ = max(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
# Returns an array of 4 points from the polygon
return [polygon[top_left][0], polygon[top_right][0], polygon[bottom_right][0], polygon[bottom_left][0]]
def display_lines(in_img, points, radius=5, colour=(0, 0, 255)):
"""Draw lines on identified sudoku puzzle"""
img = in_img.copy()
#return type of find corners
# return [polygon[top_left][0], polygon[top_right][0], polygon[bottom_right][0], polygon[bottom_left][0]]
# Dynamically change to a colour image if necessary
if len(colour) == 3:
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
top_left = tuple(int(x) for x in points[0])
top_right = tuple(int(x) for x in points[1])
bottom_right = tuple(int(x) for x in points[2])
bottom_left = tuple(int(x) for x in points[3])
cv2.line(img, top_left, top_right, color = colour, thickness=4, lineType=8, shift=0)
cv2.line(img, top_left, bottom_left, color = colour, thickness=4, lineType=8, shift=0)
cv2.line(img, bottom_right, bottom_left, color = colour, thickness=4, lineType=8, shift=0)
cv2.line(img, bottom_right, top_right, color = colour, thickness=4, lineType=8, shift=0)
show_image(img)
return img
def distance_between(p1, p2):
"""Returns the scalar distance between two points"""
a = p2[0] - p1[0]
b = p2[1] - p1[1]
return np.sqrt((a ** 2) + (b ** 2))
def crop_and_warp(img, crop_rect):
"""Crops and warps a section from an image into a square of similar size."""
# Polygon described by top left, top right, bottom right and bottom left points
top_left, top_right, bottom_right, bottom_left = crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3]
# Explicitly set the data type to float32 or `getPerspectiveTransform` will throw an error
src = np.array([top_left, top_right, bottom_right, bottom_left], dtype='float32')
# Get the longest side in the rectangle
side = max([
distance_between(bottom_right, top_right),
distance_between(top_left, bottom_left),
distance_between(bottom_right, bottom_left),
distance_between(top_left, top_right)
])
# Describe a square with side of the calculated length, this is the new perspective we want to warp to
dst = np.array([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]], dtype='float32')
# Gets the transformation matrix
m = cv2.getPerspectiveTransform(src, dst)
# Performs the transformation on the original image
return cv2.warpPerspective(img, m, (int(side), int(side)))
#credits for next 2 functions akash jawar
def scale_and_centre(img, size, margin=0, background=0):
"""Scales and centres an image onto a new background square."""
h, w = img.shape[:2]
def centre_pad(length):
"""Handles centering for a given length that may be odd or even."""
if length % 2 == 0:
side1 = int((size - length) / 2)
side2 = side1
else:
side1 = int((size - length) / 2)
side2 = side1 + 1
return side1, side2
def scale(r, x):
return int(r * x)
if h > w:
t_pad = int(margin / 2)
b_pad = t_pad
ratio = (size - margin) / h
w, h = scale(ratio, w), scale(ratio, h)
l_pad, r_pad = centre_pad(w)
else:
l_pad = int(margin / 2)
r_pad = l_pad
ratio = (size - margin) / w
w, h = scale(ratio, w), scale(ratio, h)
t_pad, b_pad = centre_pad(h)
img = cv2.resize(img, (w, h))
img = cv2.copyMakeBorder(img, t_pad, b_pad, l_pad, r_pad, cv2.BORDER_CONSTANT, None, background)
return cv2.resize(img, (size, size))
def find_largest_feature(inp_img, scan_tl=None, scan_br=None):
"""
Uses the fact the `floodFill` function returns a bounding box of the area it filled to find the biggest
connected pixel structure in the image. Fills this structure in white, reducing the rest to black.
"""
img = inp_img.copy() # Copy the image, leaving the original untouched
height, width = img.shape[:2]
max_area = 0
seed_point = (None, None)
if scan_tl is None:
scan_tl = [0, 0]
if scan_br is None:
scan_br = [width, height]
# Loop through the image
for x in range(scan_tl[0], scan_br[0]):
for y in range(scan_tl[1], scan_br[1]):
# Only operate on light or white squares
if img.item(y, x) == 255 and x < width and y < height: # Note that .item() appears to take input as y, x
area = cv2.floodFill(img, None, (x, y), 64)
if area[0] > max_area: # Gets the maximum bound area which should be the grid
max_area = area[0]
seed_point = (x, y)
# Colour everything grey (compensates for features outside of our middle scanning range
for x in range(width):
for y in range(height):
if img.item(y, x) == 255 and x < width and y < height:
cv2.floodFill(img, None, (x, y), 64)
mask = np.zeros((height + 2, width + 2), np.uint8) # Mask that is 2 pixels bigger than the image
# Highlight the main feature
if all([p is not None for p in seed_point]):
cv2.floodFill(img, mask, seed_point, 255)
top, bottom, left, right = height, 0, width, 0
for x in range(width):
for y in range(height):
if img.item(y, x) == 64: # Hide anything that isn't the main feature
cv2.floodFill(img, mask, (x, y), 0)
# Find the bounding parameters
if img.item(y, x) == 255:
top = y if y < top else top
bottom = y if y > bottom else bottom
left = x if x < left else left
right = x if x > right else right
bbox = [[left, top], [right, bottom]]
return img, np.array(bbox, dtype='float32'), seed_point
def infer_grid(img):
"""Infers 81 cell grid from a square image."""
squares = []
side = img.shape[:1]
side = side[0] / 9
# Note that we swap j and i here so the rectangles are stored in the list reading left-right instead of top-down.
for j in range(9):
for i in range(9):
p1 = (i * side, j * side) # Top left corner of a bounding box
p2 = ((i + 1) * side, (j + 1) * side) # Bottom right corner of bounding box
squares.append((p1, p2))
return squares
def cut_from_rect(img, rect):
"""Cuts a rectangle from an image using the top left and bottom right points."""
return img[int(rect[0][1]):int(rect[1][1]), int(rect[0][0]):int(rect[1][0])]
def extract_digit(img, rect, size):
"""Extracts a digit (if one exists) from a Sudoku square."""
digit = cut_from_rect(img, rect) # Get the digit box from the whole square
# Use fill feature finding to get the largest feature in middle of the box
# Margin used to define an area in the middle we would expect to find a pixel belonging to the digit
h, w = digit.shape[:2]
margin = int(np.mean([h, w]) / 2.5)
_, bbox, seed = find_largest_feature(digit, [margin, margin], [w - margin, h - margin])
digit = cut_from_rect(digit, bbox)
# Scale and pad the digit so that it fits a square of the digit size we're using for machine learning
w = bbox[1][0] - bbox[0][0]
h = bbox[1][1] - bbox[0][1]
# Ignore any small bounding boxes
if w > 0 and h > 0 and (w * h) > 100 and len(digit) > 0:
return scale_and_centre(digit, size, 4)
else:
return np.zeros((size, size), np.uint8)
def get_digits(img, squares, size):
"""Extracts digits from their cells and builds an array"""
digits = []
img = pre_process_cropped(img)
for square in squares:
digits.append(extract_digit(img, square, size))
return digits
def pre_process_cropped(img):
cropped = pre_process_image(img, skip_dilate=False, do_erode=False)
cropped = pre_process_image(cropped, skip_dilate=True, do_erode=True, kernel_digit=4)
return cropped