Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
iperov
GitHub Repository: iperov/deepfacelab
Path: blob/master/core/imagelib/estimate_sharpness.py
628 views
1
"""
2
Copyright (c) 2009-2010 Arizona Board of Regents. All Rights Reserved.
3
Contact: Lina Karam ([email protected]) and Niranjan Narvekar ([email protected])
4
Image, Video, and Usabilty (IVU) Lab, http://ivulab.asu.edu , Arizona State University
5
This copyright statement may not be removed from any file containing it or from modifications to these files.
6
This copyright notice must also be included in any file or product that is derived from the source files.
7
8
Redistribution and use of this code in source and binary forms, with or without modification, are permitted provided that the
9
following conditions are met:
10
- Redistribution's of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
11
- Redistribution's in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer
12
in the documentation and/or other materials provided with the distribution.
13
- The Image, Video, and Usability Laboratory (IVU Lab, http://ivulab.asu.edu) is acknowledged in any publication that
14
reports research results using this code, copies of this code, or modifications of this code.
15
The code and our papers are to be cited in the bibliography as:
16
17
N. D. Narvekar and L. J. Karam, "CPBD Sharpness Metric Software", http://ivulab.asu.edu/Quality/CPBD
18
19
N. D. Narvekar and L. J. Karam, "A No-Reference Image Blur Metric Based on the Cumulative
20
Probability of Blur Detection (CPBD)," accepted and to appear in the IEEE Transactions on Image Processing, 2011.
21
22
N. D. Narvekar and L. J. Karam, "An Improved No-Reference Sharpness Metric Based on the Probability of Blur Detection," International Workshop on Video Processing and Quality Metrics for Consumer Electronics (VPQM), January 2010, http://www.vpqm.org (pdf)
23
24
N. D. Narvekar and L. J. Karam, "A No Reference Perceptual Quality Metric based on Cumulative Probability of Blur Detection," First International Workshop on the Quality of Multimedia Experience (QoMEX), pp. 87-91, July 2009.
25
26
DISCLAIMER:
27
This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall the Arizona Board of Regents, Arizona State University, IVU Lab members, authors or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute
28
goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.
29
"""
30
31
import numpy as np
32
import cv2
33
from math import atan2, pi
34
35
36
def sobel(image):
37
# type: (numpy.ndarray) -> numpy.ndarray
38
"""
39
Find edges using the Sobel approximation to the derivatives.
40
41
Inspired by the [Octave implementation](https://sourceforge.net/p/octave/image/ci/default/tree/inst/edge.m#l196).
42
"""
43
from skimage.filters.edges import HSOBEL_WEIGHTS
44
h1 = np.array(HSOBEL_WEIGHTS)
45
h1 /= np.sum(abs(h1)) # normalize h1
46
47
from scipy.ndimage import convolve
48
strength2 = np.square(convolve(image, h1.T))
49
50
# Note: https://sourceforge.net/p/octave/image/ci/default/tree/inst/edge.m#l59
51
thresh2 = 2 * np.sqrt(np.mean(strength2))
52
53
strength2[strength2 <= thresh2] = 0
54
return _simple_thinning(strength2)
55
56
57
def _simple_thinning(strength):
58
# type: (numpy.ndarray) -> numpy.ndarray
59
"""
60
Perform a very simple thinning.
61
62
Inspired by the [Octave implementation](https://sourceforge.net/p/octave/image/ci/default/tree/inst/edge.m#l512).
63
"""
64
num_rows, num_cols = strength.shape
65
66
zero_column = np.zeros((num_rows, 1))
67
zero_row = np.zeros((1, num_cols))
68
69
x = (
70
(strength > np.c_[zero_column, strength[:, :-1]]) &
71
(strength > np.c_[strength[:, 1:], zero_column])
72
)
73
74
y = (
75
(strength > np.r_[zero_row, strength[:-1, :]]) &
76
(strength > np.r_[strength[1:, :], zero_row])
77
)
78
79
return x | y
80
81
82
83
84
85
# threshold to characterize blocks as edge/non-edge blocks
86
THRESHOLD = 0.002
87
# fitting parameter
88
BETA = 3.6
89
# block size
90
BLOCK_HEIGHT, BLOCK_WIDTH = (64, 64)
91
# just noticeable widths based on the perceptual experiments
92
WIDTH_JNB = np.concatenate([5*np.ones(51), 3*np.ones(205)])
93
94
95
def compute(image):
96
# type: (numpy.ndarray) -> float
97
"""Compute the sharpness metric for the given data."""
98
99
# convert the image to double for further processing
100
image = image.astype(np.float64)
101
102
# edge detection using canny and sobel canny edge detection is done to
103
# classify the blocks as edge or non-edge blocks and sobel edge
104
# detection is done for the purpose of edge width measurement.
105
from skimage.feature import canny
106
canny_edges = canny(image)
107
sobel_edges = sobel(image)
108
109
# edge width calculation
110
marziliano_widths = marziliano_method(sobel_edges, image)
111
112
# sharpness metric calculation
113
return _calculate_sharpness_metric(image, canny_edges, marziliano_widths)
114
115
116
def marziliano_method(edges, image):
117
# type: (numpy.ndarray, numpy.ndarray) -> numpy.ndarray
118
"""
119
Calculate the widths of the given edges.
120
121
:return: A matrix with the same dimensions as the given image with 0's at
122
non-edge locations and edge-widths at the edge locations.
123
"""
124
125
# `edge_widths` consists of zero and non-zero values. A zero value
126
# indicates that there is no edge at that position and a non-zero value
127
# indicates that there is an edge at that position and the value itself
128
# gives the edge width.
129
edge_widths = np.zeros(image.shape)
130
131
# find the gradient for the image
132
gradient_y, gradient_x = np.gradient(image)
133
134
# dimensions of the image
135
img_height, img_width = image.shape
136
137
# holds the angle information of the edges
138
edge_angles = np.zeros(image.shape)
139
140
# calculate the angle of the edges
141
for row in range(img_height):
142
for col in range(img_width):
143
if gradient_x[row, col] != 0:
144
edge_angles[row, col] = atan2(gradient_y[row, col], gradient_x[row, col]) * (180 / pi)
145
elif gradient_x[row, col] == 0 and gradient_y[row, col] == 0:
146
edge_angles[row,col] = 0
147
elif gradient_x[row, col] == 0 and gradient_y[row, col] == pi/2:
148
edge_angles[row, col] = 90
149
150
151
if np.any(edge_angles):
152
153
# quantize the angle
154
quantized_angles = 45 * np.round(edge_angles / 45)
155
156
for row in range(1, img_height - 1):
157
for col in range(1, img_width - 1):
158
if edges[row, col] == 1:
159
160
# gradient angle = 180 or -180
161
if quantized_angles[row, col] == 180 or quantized_angles[row, col] == -180:
162
for margin in range(100 + 1):
163
inner_border = (col - 1) - margin
164
outer_border = (col - 2) - margin
165
166
# outside image or intensity increasing from left to right
167
if outer_border < 0 or (image[row, outer_border] - image[row, inner_border]) <= 0:
168
break
169
170
width_left = margin + 1
171
172
for margin in range(100 + 1):
173
inner_border = (col + 1) + margin
174
outer_border = (col + 2) + margin
175
176
# outside image or intensity increasing from left to right
177
if outer_border >= img_width or (image[row, outer_border] - image[row, inner_border]) >= 0:
178
break
179
180
width_right = margin + 1
181
182
edge_widths[row, col] = width_left + width_right
183
184
185
# gradient angle = 0
186
if quantized_angles[row, col] == 0:
187
for margin in range(100 + 1):
188
inner_border = (col - 1) - margin
189
outer_border = (col - 2) - margin
190
191
# outside image or intensity decreasing from left to right
192
if outer_border < 0 or (image[row, outer_border] - image[row, inner_border]) >= 0:
193
break
194
195
width_left = margin + 1
196
197
for margin in range(100 + 1):
198
inner_border = (col + 1) + margin
199
outer_border = (col + 2) + margin
200
201
# outside image or intensity decreasing from left to right
202
if outer_border >= img_width or (image[row, outer_border] - image[row, inner_border]) <= 0:
203
break
204
205
width_right = margin + 1
206
207
edge_widths[row, col] = width_right + width_left
208
209
return edge_widths
210
211
212
def _calculate_sharpness_metric(image, edges, edge_widths):
213
# type: (numpy.array, numpy.array, numpy.array) -> numpy.float64
214
215
# get the size of image
216
img_height, img_width = image.shape
217
218
total_num_edges = 0
219
hist_pblur = np.zeros(101)
220
221
# maximum block indices
222
num_blocks_vertically = int(img_height / BLOCK_HEIGHT)
223
num_blocks_horizontally = int(img_width / BLOCK_WIDTH)
224
225
# loop over the blocks
226
for i in range(num_blocks_vertically):
227
for j in range(num_blocks_horizontally):
228
229
# get the row and col indices for the block pixel positions
230
rows = slice(BLOCK_HEIGHT * i, BLOCK_HEIGHT * (i + 1))
231
cols = slice(BLOCK_WIDTH * j, BLOCK_WIDTH * (j + 1))
232
233
if is_edge_block(edges[rows, cols], THRESHOLD):
234
block_widths = edge_widths[rows, cols]
235
# rotate block to simulate column-major boolean indexing
236
block_widths = np.rot90(np.flipud(block_widths), 3)
237
block_widths = block_widths[block_widths != 0]
238
239
block_contrast = get_block_contrast(image[rows, cols])
240
block_jnb = WIDTH_JNB[block_contrast]
241
242
# calculate the probability of blur detection at the edges
243
# detected in the block
244
prob_blur_detection = 1 - np.exp(-abs(block_widths/block_jnb) ** BETA)
245
246
# update the statistics using the block information
247
for probability in prob_blur_detection:
248
bucket = int(round(probability * 100))
249
hist_pblur[bucket] += 1
250
total_num_edges += 1
251
252
# normalize the pdf
253
if total_num_edges > 0:
254
hist_pblur = hist_pblur / total_num_edges
255
256
# calculate the sharpness metric
257
return np.sum(hist_pblur[:64])
258
259
260
def is_edge_block(block, threshold):
261
# type: (numpy.ndarray, float) -> bool
262
"""Decide whether the given block is an edge block."""
263
return np.count_nonzero(block) > (block.size * threshold)
264
265
266
def get_block_contrast(block):
267
# type: (numpy.ndarray) -> int
268
return int(np.max(block) - np.min(block))
269
270
271
def estimate_sharpness(image):
272
if image.ndim == 3:
273
if image.shape[2] > 1:
274
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
275
else:
276
image = image[...,0]
277
278
return compute(image)
279
280