feat: 切换后端至PaddleOCR-NCNN,切换工程为CMake
1.项目后端整体迁移至PaddleOCR-NCNN算法,已通过基本的兼容性测试 2.工程改为使用CMake组织,后续为了更好地兼容第三方库,不再提供QMake工程 3.重整权利声明文件,重整代码工程,确保最小化侵权风险 Log: 切换后端至PaddleOCR-NCNN,切换工程为CMake Change-Id: I4d5d2c5d37505a4a24b389b1a4c5d12f17bfa38c
This commit is contained in:
237
3rdparty/opencv-4.5.4/modules/ts/misc/chart.py
vendored
Executable file
237
3rdparty/opencv-4.5.4/modules/ts/misc/chart.py
vendored
Executable file
@ -0,0 +1,237 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import testlog_parser, sys, os, xml, re
|
||||
from table_formatter import *
|
||||
from optparse import OptionParser
|
||||
|
||||
cvsize_re = re.compile("^\d+x\d+$")
|
||||
cvtype_re = re.compile("^(CV_)(8U|8S|16U|16S|32S|32F|64F)(C\d{1,3})?$")
|
||||
|
||||
def keyselector(a):
|
||||
if cvsize_re.match(a):
|
||||
size = [int(d) for d in a.split('x')]
|
||||
return size[0] * size[1]
|
||||
elif cvtype_re.match(a):
|
||||
if a.startswith("CV_"):
|
||||
a = a[3:]
|
||||
depth = 7
|
||||
if a[0] == '8':
|
||||
depth = (0, 1) [a[1] == 'S']
|
||||
elif a[0] == '1':
|
||||
depth = (2, 3) [a[2] == 'S']
|
||||
elif a[2] == 'S':
|
||||
depth = 4
|
||||
elif a[0] == '3':
|
||||
depth = 5
|
||||
elif a[0] == '6':
|
||||
depth = 6
|
||||
cidx = a.find('C')
|
||||
if cidx < 0:
|
||||
channels = 1
|
||||
else:
|
||||
channels = int(a[a.index('C') + 1:])
|
||||
#return (depth & 7) + ((channels - 1) << 3)
|
||||
return ((channels-1) & 511) + (depth << 9)
|
||||
return a
|
||||
|
||||
convert = lambda text: int(text) if text.isdigit() else text
|
||||
alphanum_keyselector = lambda key: [ convert(c) for c in re.split('([0-9]+)', str(keyselector(key))) ]
|
||||
|
||||
def getValueParams(test):
|
||||
param = test.get("value_param")
|
||||
if not param:
|
||||
return []
|
||||
if param.startswith("("):
|
||||
param = param[1:]
|
||||
if param.endswith(")"):
|
||||
param = param[:-1]
|
||||
args = []
|
||||
prev_pos = 0
|
||||
start = 0
|
||||
balance = 0
|
||||
while True:
|
||||
idx = param.find(",", prev_pos)
|
||||
if idx < 0:
|
||||
break
|
||||
idxlb = param.find("(", prev_pos, idx)
|
||||
while idxlb >= 0:
|
||||
balance += 1
|
||||
idxlb = param.find("(", idxlb+1, idx)
|
||||
idxrb = param.find(")", prev_pos, idx)
|
||||
while idxrb >= 0:
|
||||
balance -= 1
|
||||
idxrb = param.find(")", idxrb+1, idx)
|
||||
assert(balance >= 0)
|
||||
if balance == 0:
|
||||
args.append(param[start:idx].strip())
|
||||
start = idx + 1
|
||||
prev_pos = idx + 1
|
||||
args.append(param[start:].strip())
|
||||
return args
|
||||
#return [p.strip() for p in param.split(",")]
|
||||
|
||||
def nextPermutation(indexes, lists, x, y):
|
||||
idx = len(indexes)-1
|
||||
while idx >= 0:
|
||||
while idx == x or idx == y:
|
||||
idx -= 1
|
||||
if idx < 0:
|
||||
return False
|
||||
v = indexes[idx] + 1
|
||||
if v < len(lists[idx]):
|
||||
indexes[idx] = v;
|
||||
return True;
|
||||
else:
|
||||
indexes[idx] = 0;
|
||||
idx -= 1
|
||||
return False
|
||||
|
||||
def getTestWideName(sname, indexes, lists, x, y):
|
||||
name = sname + "::("
|
||||
for i in range(len(indexes)):
|
||||
if i > 0:
|
||||
name += ", "
|
||||
if i == x:
|
||||
name += "X"
|
||||
elif i == y:
|
||||
name += "Y"
|
||||
else:
|
||||
name += lists[i][indexes[i]]
|
||||
return str(name + ")")
|
||||
|
||||
def getTest(stests, x, y, row, col):
|
||||
for pair in stests:
|
||||
if pair[1][x] == row and pair[1][y] == col:
|
||||
return pair[0]
|
||||
return None
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = OptionParser()
|
||||
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html' or 'auto' - default)", metavar="FMT", default="auto")
|
||||
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), us, ns or ticks)", metavar="UNITS", default="ms")
|
||||
parser.add_option("-m", "--metric", dest="metric", help="output metric", metavar="NAME", default="gmean")
|
||||
parser.add_option("-x", "", dest="x", help="argument number for rows", metavar="ROW", default=1)
|
||||
parser.add_option("-y", "", dest="y", help="argument number for columns", metavar="COL", default=0)
|
||||
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if len(args) != 1:
|
||||
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml"
|
||||
exit(1)
|
||||
|
||||
options.generateHtml = detectHtmlOutputType(options.format)
|
||||
if options.metric not in metrix_table:
|
||||
options.metric = "gmean"
|
||||
if options.metric.endswith("%"):
|
||||
options.metric = options.metric[:-1]
|
||||
getter = metrix_table[options.metric][1]
|
||||
|
||||
tests = testlog_parser.parseLogFile(args[0])
|
||||
if options.filter:
|
||||
expr = re.compile(options.filter)
|
||||
tests = [(t,getValueParams(t)) for t in tests if expr.search(str(t))]
|
||||
else:
|
||||
tests = [(t,getValueParams(t)) for t in tests]
|
||||
|
||||
args[0] = os.path.basename(args[0])
|
||||
|
||||
if not tests:
|
||||
print >> sys.stderr, "Error - no tests matched"
|
||||
exit(1)
|
||||
|
||||
argsnum = len(tests[0][1])
|
||||
sname = tests[0][0].shortName()
|
||||
|
||||
arglists = []
|
||||
for i in range(argsnum):
|
||||
arglists.append({})
|
||||
|
||||
names = set()
|
||||
names1 = set()
|
||||
for pair in tests:
|
||||
sn = pair[0].shortName()
|
||||
if len(pair[1]) > 1:
|
||||
names.add(sn)
|
||||
else:
|
||||
names1.add(sn)
|
||||
if sn == sname:
|
||||
if len(pair[1]) != argsnum:
|
||||
print >> sys.stderr, "Error - unable to create chart tables for functions having different argument numbers"
|
||||
sys.exit(1)
|
||||
for i in range(argsnum):
|
||||
arglists[i][pair[1][i]] = 1
|
||||
|
||||
if names1 or len(names) != 1:
|
||||
print >> sys.stderr, "Error - unable to create tables for functions from different test suits:"
|
||||
i = 1
|
||||
for name in sorted(names):
|
||||
print >> sys.stderr, "%4s: %s" % (i, name)
|
||||
i += 1
|
||||
if names1:
|
||||
print >> sys.stderr, "Other suits in this log (can not be chosen):"
|
||||
for name in sorted(names1):
|
||||
print >> sys.stderr, "%4s: %s" % (i, name)
|
||||
i += 1
|
||||
sys.exit(1)
|
||||
|
||||
if argsnum < 2:
|
||||
print >> sys.stderr, "Error - tests from %s have less than 2 parameters" % sname
|
||||
exit(1)
|
||||
|
||||
for i in range(argsnum):
|
||||
arglists[i] = sorted([str(key) for key in arglists[i].iterkeys()], key=alphanum_keyselector)
|
||||
|
||||
if options.generateHtml and options.format != "moinwiki":
|
||||
htmlPrintHeader(sys.stdout, "Report %s for %s" % (args[0], sname))
|
||||
|
||||
indexes = [0] * argsnum
|
||||
x = int(options.x)
|
||||
y = int(options.y)
|
||||
if x == y or x < 0 or y < 0 or x >= argsnum or y >= argsnum:
|
||||
x = 1
|
||||
y = 0
|
||||
|
||||
while True:
|
||||
stests = []
|
||||
for pair in tests:
|
||||
t = pair[0]
|
||||
v = pair[1]
|
||||
for i in range(argsnum):
|
||||
if i != x and i != y:
|
||||
if v[i] != arglists[i][indexes[i]]:
|
||||
t = None
|
||||
break
|
||||
if t:
|
||||
stests.append(pair)
|
||||
|
||||
tbl = table(metrix_table[options.metric][0] + " for\n" + getTestWideName(sname, indexes, arglists, x, y))
|
||||
tbl.newColumn("x", "X\Y")
|
||||
for col in arglists[y]:
|
||||
tbl.newColumn(col, col, align="center")
|
||||
for row in arglists[x]:
|
||||
tbl.newRow()
|
||||
tbl.newCell("x", row)
|
||||
for col in arglists[y]:
|
||||
case = getTest(stests, x, y, row, col)
|
||||
if case:
|
||||
status = case.get("status")
|
||||
if status != "run":
|
||||
tbl.newCell(col, status, color = "red")
|
||||
else:
|
||||
val = getter(case, None, options.units)
|
||||
if isinstance(val, float):
|
||||
tbl.newCell(col, "%.2f %s" % (val, options.units), val)
|
||||
else:
|
||||
tbl.newCell(col, val, val)
|
||||
else:
|
||||
tbl.newCell(col, "-")
|
||||
|
||||
if options.generateHtml:
|
||||
tbl.htmlPrintTable(sys.stdout, options.format == "moinwiki")
|
||||
else:
|
||||
tbl.consolePrintTable(sys.stdout)
|
||||
if not nextPermutation(indexes, arglists, x, y):
|
||||
break
|
||||
|
||||
if options.generateHtml and options.format != "moinwiki":
|
||||
htmlPrintFooter(sys.stdout)
|
386
3rdparty/opencv-4.5.4/modules/ts/misc/color.py
vendored
Executable file
386
3rdparty/opencv-4.5.4/modules/ts/misc/color.py
vendored
Executable file
@ -0,0 +1,386 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import math, os, sys
|
||||
|
||||
webcolors = {
|
||||
"indianred": "#cd5c5c",
|
||||
"lightcoral": "#f08080",
|
||||
"salmon": "#fa8072",
|
||||
"darksalmon": "#e9967a",
|
||||
"lightsalmon": "#ffa07a",
|
||||
"red": "#ff0000",
|
||||
"crimson": "#dc143c",
|
||||
"firebrick": "#b22222",
|
||||
"darkred": "#8b0000",
|
||||
"pink": "#ffc0cb",
|
||||
"lightpink": "#ffb6c1",
|
||||
"hotpink": "#ff69b4",
|
||||
"deeppink": "#ff1493",
|
||||
"mediumvioletred": "#c71585",
|
||||
"palevioletred": "#db7093",
|
||||
"lightsalmon": "#ffa07a",
|
||||
"coral": "#ff7f50",
|
||||
"tomato": "#ff6347",
|
||||
"orangered": "#ff4500",
|
||||
"darkorange": "#ff8c00",
|
||||
"orange": "#ffa500",
|
||||
"gold": "#ffd700",
|
||||
"yellow": "#ffff00",
|
||||
"lightyellow": "#ffffe0",
|
||||
"lemonchiffon": "#fffacd",
|
||||
"lightgoldenrodyellow": "#fafad2",
|
||||
"papayawhip": "#ffefd5",
|
||||
"moccasin": "#ffe4b5",
|
||||
"peachpuff": "#ffdab9",
|
||||
"palegoldenrod": "#eee8aa",
|
||||
"khaki": "#f0e68c",
|
||||
"darkkhaki": "#bdb76b",
|
||||
"lavender": "#e6e6fa",
|
||||
"thistle": "#d8bfd8",
|
||||
"plum": "#dda0dd",
|
||||
"violet": "#ee82ee",
|
||||
"orchid": "#da70d6",
|
||||
"fuchsia": "#ff00ff",
|
||||
"magenta": "#ff00ff",
|
||||
"mediumorchid": "#ba55d3",
|
||||
"mediumpurple": "#9370db",
|
||||
"blueviolet": "#8a2be2",
|
||||
"darkviolet": "#9400d3",
|
||||
"darkorchid": "#9932cc",
|
||||
"darkmagenta": "#8b008b",
|
||||
"purple": "#800080",
|
||||
"indigo": "#4b0082",
|
||||
"darkslateblue": "#483d8b",
|
||||
"slateblue": "#6a5acd",
|
||||
"mediumslateblue": "#7b68ee",
|
||||
"greenyellow": "#adff2f",
|
||||
"chartreuse": "#7fff00",
|
||||
"lawngreen": "#7cfc00",
|
||||
"lime": "#00ff00",
|
||||
"limegreen": "#32cd32",
|
||||
"palegreen": "#98fb98",
|
||||
"lightgreen": "#90ee90",
|
||||
"mediumspringgreen": "#00fa9a",
|
||||
"springgreen": "#00ff7f",
|
||||
"mediumseagreen": "#3cb371",
|
||||
"seagreen": "#2e8b57",
|
||||
"forestgreen": "#228b22",
|
||||
"green": "#008000",
|
||||
"darkgreen": "#006400",
|
||||
"yellowgreen": "#9acd32",
|
||||
"olivedrab": "#6b8e23",
|
||||
"olive": "#808000",
|
||||
"darkolivegreen": "#556b2f",
|
||||
"mediumaquamarine": "#66cdaa",
|
||||
"darkseagreen": "#8fbc8f",
|
||||
"lightseagreen": "#20b2aa",
|
||||
"darkcyan": "#008b8b",
|
||||
"teal": "#008080",
|
||||
"aqua": "#00ffff",
|
||||
"cyan": "#00ffff",
|
||||
"lightcyan": "#e0ffff",
|
||||
"paleturquoise": "#afeeee",
|
||||
"aquamarine": "#7fffd4",
|
||||
"turquoise": "#40e0d0",
|
||||
"mediumturquoise": "#48d1cc",
|
||||
"darkturquoise": "#00ced1",
|
||||
"cadetblue": "#5f9ea0",
|
||||
"steelblue": "#4682b4",
|
||||
"lightsteelblue": "#b0c4de",
|
||||
"powderblue": "#b0e0e6",
|
||||
"lightblue": "#add8e6",
|
||||
"skyblue": "#87ceeb",
|
||||
"lightskyblue": "#87cefa",
|
||||
"deepskyblue": "#00bfff",
|
||||
"dodgerblue": "#1e90ff",
|
||||
"cornflowerblue": "#6495ed",
|
||||
"royalblue": "#4169e1",
|
||||
"blue": "#0000ff",
|
||||
"mediumblue": "#0000cd",
|
||||
"darkblue": "#00008b",
|
||||
"navy": "#000080",
|
||||
"midnightblue": "#191970",
|
||||
"cornsilk": "#fff8dc",
|
||||
"blanchedalmond": "#ffebcd",
|
||||
"bisque": "#ffe4c4",
|
||||
"navajowhite": "#ffdead",
|
||||
"wheat": "#f5deb3",
|
||||
"burlywood": "#deb887",
|
||||
"tan": "#d2b48c",
|
||||
"rosybrown": "#bc8f8f",
|
||||
"sandybrown": "#f4a460",
|
||||
"goldenrod": "#daa520",
|
||||
"darkgoldenrod": "#b8860b",
|
||||
"peru": "#cd853f",
|
||||
"chocolate": "#d2691e",
|
||||
"saddlebrown": "#8b4513",
|
||||
"sienna": "#a0522d",
|
||||
"brown": "#a52a2a",
|
||||
"maroon": "#800000",
|
||||
"white": "#ffffff",
|
||||
"snow": "#fffafa",
|
||||
"honeydew": "#f0fff0",
|
||||
"mintcream": "#f5fffa",
|
||||
"azure": "#f0ffff",
|
||||
"aliceblue": "#f0f8ff",
|
||||
"ghostwhite": "#f8f8ff",
|
||||
"whitesmoke": "#f5f5f5",
|
||||
"seashell": "#fff5ee",
|
||||
"beige": "#f5f5dc",
|
||||
"oldlace": "#fdf5e6",
|
||||
"floralwhite": "#fffaf0",
|
||||
"ivory": "#fffff0",
|
||||
"antiquewhite": "#faebd7",
|
||||
"linen": "#faf0e6",
|
||||
"lavenderblush": "#fff0f5",
|
||||
"mistyrose": "#ffe4e1",
|
||||
"gainsboro": "#dcdcdc",
|
||||
"lightgrey": "#d3d3d3",
|
||||
"silver": "#c0c0c0",
|
||||
"darkgray": "#a9a9a9",
|
||||
"gray": "#808080",
|
||||
"dimgray": "#696969",
|
||||
"lightslategray": "#778899",
|
||||
"slategray": "#708090",
|
||||
"darkslategray": "#2f4f4f",
|
||||
"black": "#000000",
|
||||
}
|
||||
|
||||
if os.name == "nt":
|
||||
consoleColors = [
|
||||
"#000000", #{ 0, 0, 0 },//0 - black
|
||||
"#000080", #{ 0, 0, 128 },//1 - navy
|
||||
"#008000", #{ 0, 128, 0 },//2 - green
|
||||
"#008080", #{ 0, 128, 128 },//3 - teal
|
||||
"#800000", #{ 128, 0, 0 },//4 - maroon
|
||||
"#800080", #{ 128, 0, 128 },//5 - purple
|
||||
"#808000", #{ 128, 128, 0 },//6 - olive
|
||||
"#C0C0C0", #{ 192, 192, 192 },//7 - silver
|
||||
"#808080", #{ 128, 128, 128 },//8 - gray
|
||||
"#0000FF", #{ 0, 0, 255 },//9 - blue
|
||||
"#00FF00", #{ 0, 255, 0 },//a - lime
|
||||
"#00FFFF", #{ 0, 255, 255 },//b - cyan
|
||||
"#FF0000", #{ 255, 0, 0 },//c - red
|
||||
"#FF00FF", #{ 255, 0, 255 },//d - magenta
|
||||
"#FFFF00", #{ 255, 255, 0 },//e - yellow
|
||||
"#FFFFFF", #{ 255, 255, 255 } //f - white
|
||||
]
|
||||
else:
|
||||
consoleColors = [
|
||||
"#2e3436",
|
||||
"#cc0000",
|
||||
"#4e9a06",
|
||||
"#c4a000",
|
||||
"#3465a4",
|
||||
"#75507b",
|
||||
"#06989a",
|
||||
"#d3d7cf",
|
||||
"#ffffff",
|
||||
|
||||
"#555753",
|
||||
"#ef2929",
|
||||
"#8ae234",
|
||||
"#fce94f",
|
||||
"#729fcf",
|
||||
"#ad7fa8",
|
||||
"#34e2e2",
|
||||
"#eeeeec",
|
||||
]
|
||||
|
||||
def RGB2LAB(r,g,b):
|
||||
if max(r,g,b):
|
||||
r /= 255.
|
||||
g /= 255.
|
||||
b /= 255.
|
||||
|
||||
X = (0.412453 * r + 0.357580 * g + 0.180423 * b) / 0.950456
|
||||
Y = (0.212671 * r + 0.715160 * g + 0.072169 * b)
|
||||
Z = (0.019334 * r + 0.119193 * g + 0.950227 * b) / 1.088754
|
||||
|
||||
#[X * 0.950456] [0.412453 0.357580 0.180423] [R]
|
||||
#[Y ] = [0.212671 0.715160 0.072169] * [G]
|
||||
#[Z * 1.088754] [0.019334 0.119193 0.950227] [B]
|
||||
|
||||
T = 0.008856 #threshold
|
||||
|
||||
if X > T:
|
||||
fX = math.pow(X, 1./3.)
|
||||
else:
|
||||
fX = 7.787 * X + 16./116.
|
||||
|
||||
# Compute L
|
||||
if Y > T:
|
||||
Y3 = math.pow(Y, 1./3.)
|
||||
fY = Y3
|
||||
L = 116. * Y3 - 16.0
|
||||
else:
|
||||
fY = 7.787 * Y + 16./116.
|
||||
L = 903.3 * Y
|
||||
|
||||
if Z > T:
|
||||
fZ = math.pow(Z, 1./3.)
|
||||
else:
|
||||
fZ = 7.787 * Z + 16./116.
|
||||
|
||||
# Compute a and b
|
||||
a = 500. * (fX - fY)
|
||||
b = 200. * (fY - fZ)
|
||||
|
||||
return (L,a,b)
|
||||
|
||||
def colorDistance(r1,g1,b1 = None, r2 = None, g2 = None,b2 = None):
|
||||
if type(r1) == tuple and type(g1) == tuple and b1 is None and r2 is None and g2 is None and b2 is None:
|
||||
(l1,a1,b1) = RGB2LAB(*r1)
|
||||
(l2,a2,b2) = RGB2LAB(*g1)
|
||||
else:
|
||||
(l1,a1,b1) = RGB2LAB(r1,g1,b1)
|
||||
(l2,a2,b2) = RGB2LAB(r2,g2,b2)
|
||||
#CIE94
|
||||
dl = l1-l2
|
||||
C1 = math.sqrt(a1*a1 + b1*b1)
|
||||
C2 = math.sqrt(a2*a2 + b2*b2)
|
||||
dC = C1 - C2
|
||||
da = a1-a2
|
||||
db = b1-b2
|
||||
dH = math.sqrt(max(0, da*da + db*db - dC*dC))
|
||||
Kl = 1
|
||||
K1 = 0.045
|
||||
K2 = 0.015
|
||||
|
||||
s1 = dl/Kl
|
||||
s2 = dC/(1. + K1 * C1)
|
||||
s3 = dH/(1. + K2 * C1)
|
||||
return math.sqrt(s1*s1 + s2*s2 + s3*s3)
|
||||
|
||||
def parseHexColor(col):
|
||||
if len(col) != 4 and len(col) != 7 and not col.startswith("#"):
|
||||
return (0,0,0)
|
||||
if len(col) == 4:
|
||||
r = col[1]*2
|
||||
g = col[2]*2
|
||||
b = col[3]*2
|
||||
else:
|
||||
r = col[1:3]
|
||||
g = col[3:5]
|
||||
b = col[5:7]
|
||||
return (int(r,16), int(g,16), int(b,16))
|
||||
|
||||
def getColor(col):
|
||||
if isinstance(col, str):
|
||||
if col.lower() in webcolors:
|
||||
return parseHexColor(webcolors[col.lower()])
|
||||
else:
|
||||
return parseHexColor(col)
|
||||
else:
|
||||
return col
|
||||
|
||||
def getNearestConsoleColor(col):
|
||||
color = getColor(col)
|
||||
minidx = 0
|
||||
mindist = colorDistance(color, getColor(consoleColors[0]))
|
||||
for i in range(len(consoleColors)):
|
||||
dist = colorDistance(color, getColor(consoleColors[i]))
|
||||
if dist < mindist:
|
||||
mindist = dist
|
||||
minidx = i
|
||||
return minidx
|
||||
|
||||
if os.name == 'nt':
|
||||
import msvcrt
|
||||
from ctypes import windll, Structure, c_short, c_ushort, byref
|
||||
SHORT = c_short
|
||||
WORD = c_ushort
|
||||
|
||||
class COORD(Structure):
|
||||
_fields_ = [
|
||||
("X", SHORT),
|
||||
("Y", SHORT)]
|
||||
|
||||
class SMALL_RECT(Structure):
|
||||
_fields_ = [
|
||||
("Left", SHORT),
|
||||
("Top", SHORT),
|
||||
("Right", SHORT),
|
||||
("Bottom", SHORT)]
|
||||
|
||||
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
|
||||
_fields_ = [
|
||||
("dwSize", COORD),
|
||||
("dwCursorPosition", COORD),
|
||||
("wAttributes", WORD),
|
||||
("srWindow", SMALL_RECT),
|
||||
("dwMaximumWindowSize", COORD)]
|
||||
|
||||
class winConsoleColorizer(object):
|
||||
def __init__(self, stream):
|
||||
self.handle = msvcrt.get_osfhandle(stream.fileno())
|
||||
self.default_attrs = 7#self.get_text_attr()
|
||||
self.stream = stream
|
||||
|
||||
def get_text_attr(self):
|
||||
csbi = CONSOLE_SCREEN_BUFFER_INFO()
|
||||
windll.kernel32.GetConsoleScreenBufferInfo(self.handle, byref(csbi))
|
||||
return csbi.wAttributes
|
||||
|
||||
def set_text_attr(self, color):
|
||||
windll.kernel32.SetConsoleTextAttribute(self.handle, color)
|
||||
|
||||
def write(self, *text, **attrs):
|
||||
if not text:
|
||||
return
|
||||
color = attrs.get("color", None)
|
||||
if color:
|
||||
col = getNearestConsoleColor(color)
|
||||
self.stream.flush()
|
||||
self.set_text_attr(col)
|
||||
self.stream.write(" ".join([str(t) for t in text]))
|
||||
if color:
|
||||
self.stream.flush()
|
||||
self.set_text_attr(self.default_attrs)
|
||||
|
||||
class dummyColorizer(object):
|
||||
def __init__(self, stream):
|
||||
self.stream = stream
|
||||
|
||||
def write(self, *text, **attrs):
|
||||
if text:
|
||||
self.stream.write(" ".join([str(t) for t in text]))
|
||||
|
||||
class asciiSeqColorizer(object):
|
||||
RESET_SEQ = "\033[0m"
|
||||
#BOLD_SEQ = "\033[1m"
|
||||
ITALIC_SEQ = "\033[3m"
|
||||
UNDERLINE_SEQ = "\033[4m"
|
||||
STRIKEOUT_SEQ = "\033[9m"
|
||||
COLOR_SEQ0 = "\033[00;%dm" #dark
|
||||
COLOR_SEQ1 = "\033[01;%dm" #bold and light
|
||||
|
||||
def __init__(self, stream):
|
||||
self.stream = stream
|
||||
|
||||
def get_seq(self, code):
|
||||
if code > 8:
|
||||
return self.__class__.COLOR_SEQ1 % (30 + code - 9)
|
||||
else:
|
||||
return self.__class__.COLOR_SEQ0 % (30 + code)
|
||||
|
||||
def write(self, *text, **attrs):
|
||||
if not text:
|
||||
return
|
||||
color = attrs.get("color", None)
|
||||
if color:
|
||||
col = getNearestConsoleColor(color)
|
||||
self.stream.write(self.get_seq(col))
|
||||
self.stream.write(" ".join([str(t) for t in text]))
|
||||
if color:
|
||||
self.stream.write(self.__class__.RESET_SEQ)
|
||||
|
||||
|
||||
def getColorizer(stream):
|
||||
if stream.isatty():
|
||||
if os.name == "nt":
|
||||
return winConsoleColorizer(stream)
|
||||
else:
|
||||
return asciiSeqColorizer(stream)
|
||||
else:
|
||||
return dummyColorizer(stream)
|
45
3rdparty/opencv-4.5.4/modules/ts/misc/concatlogs.py
vendored
Executable file
45
3rdparty/opencv-4.5.4/modules/ts/misc/concatlogs.py
vendored
Executable file
@ -0,0 +1,45 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from optparse import OptionParser
|
||||
import glob, sys, os, re
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = OptionParser()
|
||||
parser.add_option("-o", "--output", dest="output", help="output file name", metavar="FILENAME", default=None)
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if not options.output:
|
||||
sys.stderr.write("Error: output file name is not provided")
|
||||
exit(-1)
|
||||
|
||||
files = []
|
||||
for arg in args:
|
||||
if ("*" in arg) or ("?" in arg):
|
||||
files.extend([os.path.abspath(f) for f in glob.glob(arg)])
|
||||
else:
|
||||
files.append(os.path.abspath(arg))
|
||||
|
||||
html = None
|
||||
for f in sorted(files):
|
||||
try:
|
||||
fobj = open(f)
|
||||
if not fobj:
|
||||
continue
|
||||
text = fobj.read()
|
||||
if not html:
|
||||
html = text
|
||||
continue
|
||||
idx1 = text.find("<tbody>") + len("<tbody>")
|
||||
idx2 = html.rfind("</tbody>")
|
||||
html = html[:idx2] + re.sub(r"[ \t\n\r]+", " ", text[idx1:])
|
||||
except:
|
||||
pass
|
||||
|
||||
if html:
|
||||
idx1 = text.find("<title>") + len("<title>")
|
||||
idx2 = html.find("</title>")
|
||||
html = html[:idx1] + "OpenCV performance testing report" + html[idx2:]
|
||||
open(options.output, "w").write(html)
|
||||
else:
|
||||
sys.stderr.write("Error: no input data")
|
||||
exit(-1)
|
160
3rdparty/opencv-4.5.4/modules/ts/misc/perf_tests_timing.py
vendored
Normal file
160
3rdparty/opencv-4.5.4/modules/ts/misc/perf_tests_timing.py
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
import testlog_parser, sys, os, xml, glob, re
|
||||
from table_formatter import *
|
||||
from optparse import OptionParser
|
||||
from operator import itemgetter, attrgetter
|
||||
from summary import getSetName, alphanum_keyselector
|
||||
import re
|
||||
|
||||
if __name__ == "__main__":
|
||||
usage = "%prog <log_name>.xml [...]"
|
||||
parser = OptionParser(usage = usage)
|
||||
|
||||
parser.add_option("-o", "--output", dest = "format",
|
||||
help = "output results in text format (can be 'txt', 'html' or 'auto' - default)",
|
||||
metavar = 'FMT', default = 'auto')
|
||||
|
||||
parser.add_option("--failed-only", action = "store_true", dest = "failedOnly",
|
||||
help = "print only failed tests", default = False)
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
options.generateHtml = detectHtmlOutputType(options.format)
|
||||
|
||||
files = []
|
||||
testsuits = [] # testsuit module, name, time, num, flag for failed tests
|
||||
overall_time = 0
|
||||
|
||||
seen = set()
|
||||
for arg in args:
|
||||
if ("*" in arg) or ("?" in arg):
|
||||
flist = [os.path.abspath(f) for f in glob.glob(arg)]
|
||||
flist = sorted(flist, key= lambda text: str(text).replace("M", "_"))
|
||||
files.extend([ x for x in flist if x not in seen and not seen.add(x)])
|
||||
else:
|
||||
fname = os.path.abspath(arg)
|
||||
if fname not in seen and not seen.add(fname):
|
||||
files.append(fname)
|
||||
|
||||
file = os.path.abspath(fname)
|
||||
if not os.path.isfile(file):
|
||||
sys.stderr.write("IOError reading \"" + file + "\" - " + str(err) + os.linesep)
|
||||
parser.print_help()
|
||||
exit(0)
|
||||
|
||||
fname = os.path.basename(fname)
|
||||
find_module_name = re.search(r'([^_]*)', fname)
|
||||
module_name = find_module_name.group(0)
|
||||
|
||||
test_sets = []
|
||||
try:
|
||||
tests = testlog_parser.parseLogFile(file)
|
||||
if tests:
|
||||
test_sets.append((os.path.basename(file), tests))
|
||||
except IOError as err:
|
||||
sys.stderr.write("IOError reading \"" + file + "\" - " + str(err) + os.linesep)
|
||||
except xml.parsers.expat.ExpatError as err:
|
||||
sys.stderr.write("ExpatError reading \"" + file + "\" - " + str(err) + os.linesep)
|
||||
|
||||
if not test_sets:
|
||||
continue
|
||||
|
||||
# find matches
|
||||
setsCount = len(test_sets)
|
||||
test_cases = {}
|
||||
|
||||
name_extractor = lambda name: str(name)
|
||||
|
||||
for i in range(setsCount):
|
||||
for case in test_sets[i][1]:
|
||||
name = name_extractor(case)
|
||||
if name not in test_cases:
|
||||
test_cases[name] = [None] * setsCount
|
||||
test_cases[name][i] = case
|
||||
|
||||
prevGroupName = None
|
||||
suit_time = 0
|
||||
suit_num = 0
|
||||
fails_num = 0
|
||||
for name in sorted(test_cases.iterkeys(), key=alphanum_keyselector):
|
||||
cases = test_cases[name]
|
||||
|
||||
groupName = next(c for c in cases if c).shortName()
|
||||
if groupName != prevGroupName:
|
||||
if prevGroupName != None:
|
||||
suit_time = suit_time/60 #from seconds to minutes
|
||||
testsuits.append({'module': module_name, 'name': prevGroupName, \
|
||||
'time': suit_time, 'num': suit_num, 'failed': fails_num})
|
||||
overall_time += suit_time
|
||||
suit_time = 0
|
||||
suit_num = 0
|
||||
fails_num = 0
|
||||
prevGroupName = groupName
|
||||
|
||||
for i in range(setsCount):
|
||||
case = cases[i]
|
||||
if not case is None:
|
||||
suit_num += 1
|
||||
if case.get('status') == 'run':
|
||||
suit_time += case.get('time')
|
||||
if case.get('status') == 'failed':
|
||||
fails_num += 1
|
||||
|
||||
# last testsuit processing
|
||||
suit_time = suit_time/60
|
||||
testsuits.append({'module': module_name, 'name': prevGroupName, \
|
||||
'time': suit_time, 'num': suit_num, 'failed': fails_num})
|
||||
overall_time += suit_time
|
||||
|
||||
if len(testsuits)==0:
|
||||
exit(0)
|
||||
|
||||
tbl = table()
|
||||
rows = 0
|
||||
|
||||
if not options.failedOnly:
|
||||
tbl.newColumn('module', 'Module', align = 'left', cssclass = 'col_name')
|
||||
tbl.newColumn('name', 'Testsuit', align = 'left', cssclass = 'col_name')
|
||||
tbl.newColumn('time', 'Time (min)', align = 'center', cssclass = 'col_name')
|
||||
tbl.newColumn('num', 'Num of tests', align = 'center', cssclass = 'col_name')
|
||||
tbl.newColumn('failed', 'Failed', align = 'center', cssclass = 'col_name')
|
||||
|
||||
# rows
|
||||
for suit in sorted(testsuits, key = lambda suit: suit['time'], reverse = True):
|
||||
tbl.newRow()
|
||||
tbl.newCell('module', suit['module'])
|
||||
tbl.newCell('name', suit['name'])
|
||||
tbl.newCell('time', formatValue(suit['time'], '', ''), suit['time'])
|
||||
tbl.newCell('num', suit['num'])
|
||||
if (suit['failed'] != 0):
|
||||
tbl.newCell('failed', suit['failed'])
|
||||
else:
|
||||
tbl.newCell('failed', ' ')
|
||||
rows += 1
|
||||
|
||||
else:
|
||||
tbl.newColumn('module', 'Module', align = 'left', cssclass = 'col_name')
|
||||
tbl.newColumn('name', 'Testsuit', align = 'left', cssclass = 'col_name')
|
||||
tbl.newColumn('failed', 'Failed', align = 'center', cssclass = 'col_name')
|
||||
|
||||
# rows
|
||||
for suit in sorted(testsuits, key = lambda suit: suit['time'], reverse = True):
|
||||
if (suit['failed'] != 0):
|
||||
tbl.newRow()
|
||||
tbl.newCell('module', suit['module'])
|
||||
tbl.newCell('name', suit['name'])
|
||||
tbl.newCell('failed', suit['failed'])
|
||||
rows += 1
|
||||
|
||||
# output table
|
||||
if rows:
|
||||
if options.generateHtml:
|
||||
tbl.htmlPrintTable(sys.stdout)
|
||||
htmlPrintFooter(sys.stdout)
|
||||
else:
|
||||
if not options.failedOnly:
|
||||
print('\nOverall time: %.2f min\n' % overall_time)
|
||||
tbl.consolePrintTable(sys.stdout)
|
||||
print(2 * '\n')
|
103
3rdparty/opencv-4.5.4/modules/ts/misc/report.py
vendored
Executable file
103
3rdparty/opencv-4.5.4/modules/ts/misc/report.py
vendored
Executable file
@ -0,0 +1,103 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import testlog_parser, sys, os, xml, re, glob
|
||||
from table_formatter import *
|
||||
from optparse import OptionParser
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = OptionParser()
|
||||
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html' or 'auto' - default)", metavar="FMT", default="auto")
|
||||
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), us, ns or ticks)", metavar="UNITS", default="ms")
|
||||
parser.add_option("-c", "--columns", dest="columns", help="comma-separated list of columns to show", metavar="COLS", default="")
|
||||
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
|
||||
parser.add_option("", "--show-all", action="store_true", dest="showall", default=False, help="also include empty and \"notrun\" lines")
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
if len(args) < 1:
|
||||
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml"
|
||||
exit(0)
|
||||
|
||||
options.generateHtml = detectHtmlOutputType(options.format)
|
||||
|
||||
# expand wildcards and filter duplicates
|
||||
files = []
|
||||
files1 = []
|
||||
for arg in args:
|
||||
if ("*" in arg) or ("?" in arg):
|
||||
files1.extend([os.path.abspath(f) for f in glob.glob(arg)])
|
||||
else:
|
||||
files.append(os.path.abspath(arg))
|
||||
seen = set()
|
||||
files = [ x for x in files if x not in seen and not seen.add(x)]
|
||||
files.extend((set(files1) - set(files)))
|
||||
args = files
|
||||
|
||||
# load test data
|
||||
tests = []
|
||||
files = []
|
||||
for arg in set(args):
|
||||
try:
|
||||
cases = testlog_parser.parseLogFile(arg)
|
||||
if cases:
|
||||
files.append(os.path.basename(arg))
|
||||
tests.extend(cases)
|
||||
except:
|
||||
pass
|
||||
|
||||
if options.filter:
|
||||
expr = re.compile(options.filter)
|
||||
tests = [t for t in tests if expr.search(str(t))]
|
||||
|
||||
tbl = table(", ".join(files))
|
||||
if options.columns:
|
||||
metrics = [s.strip() for s in options.columns.split(",")]
|
||||
metrics = [m for m in metrics if m and not m.endswith("%") and m in metrix_table]
|
||||
else:
|
||||
metrics = None
|
||||
if not metrics:
|
||||
metrics = ["name", "samples", "outliers", "min", "median", "gmean", "mean", "stddev"]
|
||||
if "name" not in metrics:
|
||||
metrics.insert(0, "name")
|
||||
|
||||
for m in metrics:
|
||||
if m == "name":
|
||||
tbl.newColumn(m, metrix_table[m][0])
|
||||
else:
|
||||
tbl.newColumn(m, metrix_table[m][0], align = "center")
|
||||
|
||||
needNewRow = True
|
||||
for case in sorted(tests, key=lambda x: str(x)):
|
||||
if needNewRow:
|
||||
tbl.newRow()
|
||||
if not options.showall:
|
||||
needNewRow = False
|
||||
status = case.get("status")
|
||||
if status != "run":
|
||||
if status != "notrun":
|
||||
needNewRow = True
|
||||
for m in metrics:
|
||||
if m == "name":
|
||||
tbl.newCell(m, str(case))
|
||||
else:
|
||||
tbl.newCell(m, status, color = "red")
|
||||
else:
|
||||
needNewRow = True
|
||||
for m in metrics:
|
||||
val = metrix_table[m][1](case, None, options.units)
|
||||
if isinstance(val, float):
|
||||
tbl.newCell(m, "%.2f %s" % (val, options.units), val)
|
||||
else:
|
||||
tbl.newCell(m, val, val)
|
||||
if not needNewRow:
|
||||
tbl.trimLastRow()
|
||||
|
||||
# output table
|
||||
if options.generateHtml:
|
||||
if options.format == "moinwiki":
|
||||
tbl.htmlPrintTable(sys.stdout, True)
|
||||
else:
|
||||
htmlPrintHeader(sys.stdout, "Report %s tests from %s" % (len(tests), ", ".join(files)))
|
||||
tbl.htmlPrintTable(sys.stdout)
|
||||
htmlPrintFooter(sys.stdout)
|
||||
else:
|
||||
tbl.consolePrintTable(sys.stdout)
|
143
3rdparty/opencv-4.5.4/modules/ts/misc/run.py
vendored
Executable file
143
3rdparty/opencv-4.5.4/modules/ts/misc/run.py
vendored
Executable file
@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import argparse
|
||||
import logging
|
||||
import datetime
|
||||
from run_utils import Err, CMakeCache, log, execute
|
||||
from run_suite import TestSuite
|
||||
from run_android import AndroidTestSuite
|
||||
|
||||
epilog = '''
|
||||
NOTE:
|
||||
Additional options starting with "--gtest_" and "--perf_" will be passed directly to the test executables.
|
||||
'''
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
# log.basicConfig(format='[%(levelname)s] %(message)s', level = log.DEBUG)
|
||||
# log.basicConfig(format='[%(levelname)s] %(message)s', level = log.INFO)
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='OpenCV test runner script',
|
||||
epilog=epilog,
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
parser.add_argument("build_path", nargs='?', default=".", help="Path to build directory (should contain CMakeCache.txt, default is current) or to directory with tests (all platform checks will be disabled in this case)")
|
||||
parser.add_argument("-t", "--tests", metavar="MODULES", default="", help="Comma-separated list of modules to test (example: -t core,imgproc,java)")
|
||||
parser.add_argument("-b", "--blacklist", metavar="MODULES", default="", help="Comma-separated list of modules to exclude from test (example: -b java)")
|
||||
parser.add_argument("-a", "--accuracy", action="store_true", default=False, help="Look for accuracy tests instead of performance tests")
|
||||
parser.add_argument("--check", action="store_true", default=False, help="Shortcut for '--perf_min_samples=1 --perf_force_samples=1'")
|
||||
parser.add_argument("-w", "--cwd", metavar="PATH", default=".", help="Working directory for tests (default is current)")
|
||||
parser.add_argument("--list", action="store_true", default=False, help="List available tests (executables)")
|
||||
parser.add_argument("--list_short", action="store_true", default=False, help="List available tests (aliases)")
|
||||
parser.add_argument("--list_short_main", action="store_true", default=False, help="List available tests (main repository, aliases)")
|
||||
parser.add_argument("--configuration", metavar="CFG", default=None, help="Force Debug or Release configuration (for Visual Studio and Java tests build)")
|
||||
parser.add_argument("-n", "--dry_run", action="store_true", help="Do not run the tests")
|
||||
parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Print more debug information")
|
||||
|
||||
# Valgrind
|
||||
parser.add_argument("--valgrind", action="store_true", default=False, help="Run C++ tests in valgrind")
|
||||
parser.add_argument("--valgrind_supp", metavar="FILE", action='append', help="Path to valgrind suppression file (example: --valgrind_supp opencv/platforms/scripts/valgrind.supp)")
|
||||
parser.add_argument("--valgrind_opt", metavar="OPT", action="append", default=[], help="Add command line option to valgrind (example: --valgrind_opt=--leak-check=full)")
|
||||
|
||||
# QEMU
|
||||
parser.add_argument("--qemu", default="", help="Specify qemu binary and base parameters")
|
||||
|
||||
# Android
|
||||
parser.add_argument("--android", action="store_true", default=False, help="Android: force all tests to run on device")
|
||||
parser.add_argument("--android_sdk", metavar="PATH", help="Android: path to SDK to use adb and aapt tools")
|
||||
parser.add_argument("--android_test_data_path", metavar="PATH", default="/sdcard/opencv_testdata/", help="Android: path to testdata on device")
|
||||
parser.add_argument("--android_env", action='append', help="Android: add environment variable (NAME=VALUE)")
|
||||
parser.add_argument("--android_propagate_opencv_env", action="store_true", default=False, help="Android: propagate OPENCV* environment variables")
|
||||
parser.add_argument("--serial", metavar="serial number", default="", help="Android: directs command to the USB device or emulator with the given serial number")
|
||||
parser.add_argument("--package", metavar="package", default="", help="Java: run JUnit tests for specified module or Android package")
|
||||
parser.add_argument("--java_test_exclude", metavar="java_test_exclude", default="", help="Java: Filter out specific JUnit tests")
|
||||
|
||||
parser.add_argument("--trace", action="store_true", default=False, help="Trace: enable OpenCV tracing")
|
||||
parser.add_argument("--trace_dump", metavar="trace_dump", default=-1, help="Trace: dump highlight calls (specify max entries count, 0 - dump all)")
|
||||
|
||||
args, other_args = parser.parse_known_args()
|
||||
|
||||
log.setLevel(logging.DEBUG if args.verbose else logging.INFO)
|
||||
|
||||
test_args = [a for a in other_args if a.startswith("--perf_") or a.startswith("--test_") or a.startswith("--gtest_")]
|
||||
bad_args = [a for a in other_args if a not in test_args]
|
||||
if len(bad_args) > 0:
|
||||
log.error("Error: Bad arguments: %s", bad_args)
|
||||
exit(1)
|
||||
|
||||
args.mode = "test" if args.accuracy else "perf"
|
||||
|
||||
android_env = []
|
||||
if args.android_env:
|
||||
android_env.extend([entry.split("=", 1) for entry in args.android_env])
|
||||
if args.android_propagate_opencv_env:
|
||||
android_env.extend([entry for entry in os.environ.items() if entry[0].startswith('OPENCV')])
|
||||
android_env = dict(android_env)
|
||||
if args.android_test_data_path:
|
||||
android_env['OPENCV_TEST_DATA_PATH'] = args.android_test_data_path
|
||||
|
||||
if args.valgrind:
|
||||
try:
|
||||
ver = execute(["valgrind", "--version"], silent=True)
|
||||
log.debug("Using %s", ver)
|
||||
except OSError as e:
|
||||
log.error("Failed to run valgrind: %s", e)
|
||||
exit(1)
|
||||
|
||||
if len(args.build_path) != 1:
|
||||
test_args = [a for a in test_args if not a.startswith("--gtest_output=")]
|
||||
|
||||
if args.check:
|
||||
if not [a for a in test_args if a.startswith("--perf_min_samples=")]:
|
||||
test_args.extend(["--perf_min_samples=1"])
|
||||
if not [a for a in test_args if a.startswith("--perf_force_samples=")]:
|
||||
test_args.extend(["--perf_force_samples=1"])
|
||||
if not [a for a in test_args if a.startswith("--perf_verify_sanity")]:
|
||||
test_args.extend(["--perf_verify_sanity"])
|
||||
|
||||
if bool(os.environ.get('BUILD_PRECOMMIT', None)):
|
||||
test_args.extend(["--skip_unstable=1"])
|
||||
|
||||
ret = 0
|
||||
logs = []
|
||||
stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
path = args.build_path
|
||||
try:
|
||||
if not os.path.isdir(path):
|
||||
raise Err("Not a directory (should contain CMakeCache.txt ot test executables)")
|
||||
cache = CMakeCache(args.configuration)
|
||||
fname = os.path.join(path, "CMakeCache.txt")
|
||||
|
||||
if os.path.isfile(fname):
|
||||
log.debug("Reading cmake cache file: %s", fname)
|
||||
cache.read(path, fname)
|
||||
else:
|
||||
log.debug("Assuming folder contains tests: %s", path)
|
||||
cache.setDummy(path)
|
||||
|
||||
if args.android or cache.getOS() == "android":
|
||||
log.debug("Creating Android test runner")
|
||||
suite = AndroidTestSuite(args, cache, stamp, android_env)
|
||||
else:
|
||||
log.debug("Creating native test runner")
|
||||
suite = TestSuite(args, cache, stamp)
|
||||
|
||||
if args.list or args.list_short or args.list_short_main:
|
||||
suite.listTests(args.list_short or args.list_short_main, args.list_short_main)
|
||||
else:
|
||||
log.debug("Running tests in '%s', working dir: '%s'", path, args.cwd)
|
||||
|
||||
def parseTests(s):
|
||||
return [o.strip() for o in s.split(",") if o]
|
||||
logs, ret = suite.runTests(parseTests(args.tests), parseTests(args.blacklist), args.cwd, test_args)
|
||||
except Err as e:
|
||||
log.error("ERROR: test path '%s' ==> %s", path, e.msg)
|
||||
ret = -1
|
||||
|
||||
if logs:
|
||||
log.warning("Collected: %s", logs)
|
||||
|
||||
if ret != 0:
|
||||
log.error("ERROR: some tests have failed")
|
||||
exit(ret)
|
170
3rdparty/opencv-4.5.4/modules/ts/misc/run_android.py
vendored
Normal file
170
3rdparty/opencv-4.5.4/modules/ts/misc/run_android.py
vendored
Normal file
@ -0,0 +1,170 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import re
|
||||
import getpass
|
||||
from run_utils import Err, log, execute, isColorEnabled, hostos
|
||||
from run_suite import TestSuite
|
||||
|
||||
|
||||
def exe(program):
|
||||
return program + ".exe" if hostos == 'nt' else program
|
||||
|
||||
|
||||
class ApkInfo:
|
||||
def __init__(self):
|
||||
self.pkg_name = None
|
||||
self.pkg_target = None
|
||||
self.pkg_runner = None
|
||||
|
||||
def forcePackage(self, package):
|
||||
if package:
|
||||
if package.startswith("."):
|
||||
self.pkg_target += package
|
||||
else:
|
||||
self.pkg_target = package
|
||||
|
||||
|
||||
class Tool:
|
||||
def __init__(self):
|
||||
self.cmd = []
|
||||
|
||||
def run(self, args=[], silent=False):
|
||||
cmd = self.cmd[:]
|
||||
cmd.extend(args)
|
||||
return execute(self.cmd + args, silent)
|
||||
|
||||
|
||||
class Adb(Tool):
|
||||
def __init__(self, sdk_dir):
|
||||
Tool.__init__(self)
|
||||
exe_path = os.path.join(sdk_dir, exe("platform-tools/adb"))
|
||||
if not os.path.isfile(exe_path) or not os.access(exe_path, os.X_OK):
|
||||
exe_path = None
|
||||
# fix adb tool location
|
||||
if not exe_path:
|
||||
exe_path = "adb"
|
||||
self.cmd = [exe_path]
|
||||
|
||||
def init(self, serial):
|
||||
# remember current device serial. Needed if another device is connected while this script runs
|
||||
if not serial:
|
||||
serial = self.detectSerial()
|
||||
if serial:
|
||||
self.cmd.extend(["-s", serial])
|
||||
|
||||
def detectSerial(self):
|
||||
adb_res = self.run(["devices"], silent=True)
|
||||
# assume here that device name may consists of any characters except newline
|
||||
connected_devices = re.findall(r"^[^\n]+[ \t]+device\r?$", adb_res, re.MULTILINE)
|
||||
if not connected_devices:
|
||||
raise Err("Can not find Android device")
|
||||
elif len(connected_devices) != 1:
|
||||
raise Err("Too many (%s) devices are connected. Please specify single device using --serial option:\n\n%s", len(connected_devices), adb_res)
|
||||
else:
|
||||
return connected_devices[0].split("\t")[0]
|
||||
|
||||
def getOSIdentifier(self):
|
||||
return "Android" + self.run(["shell", "getprop ro.build.version.release"], silent=True).strip()
|
||||
|
||||
|
||||
class Aapt(Tool):
|
||||
def __init__(self, sdk_dir):
|
||||
Tool.__init__(self)
|
||||
aapt_fn = exe("aapt")
|
||||
aapt = None
|
||||
for r, ds, fs in os.walk(os.path.join(sdk_dir, 'build-tools')):
|
||||
if aapt_fn in fs:
|
||||
aapt = os.path.join(r, aapt_fn)
|
||||
break
|
||||
if not aapt:
|
||||
raise Err("Can not find aapt tool: %s", aapt_fn)
|
||||
self.cmd = [aapt]
|
||||
|
||||
def dump(self, exe):
|
||||
res = ApkInfo()
|
||||
output = self.run(["dump", "xmltree", exe, "AndroidManifest.xml"], silent=True)
|
||||
if not output:
|
||||
raise Err("Can not dump manifest from %s", exe)
|
||||
tags = re.split(r"[ ]+E: ", output)
|
||||
# get package name
|
||||
manifest_tag = [t for t in tags if t.startswith("manifest ")]
|
||||
if not manifest_tag:
|
||||
raise Err("Can not read package name from: %s", exe)
|
||||
res.pkg_name = re.search(r"^[ ]+A: package=\"(?P<pkg>.*?)\" \(Raw: \"(?P=pkg)\"\)\r?$", manifest_tag[0], flags=re.MULTILINE).group("pkg")
|
||||
# get test instrumentation info
|
||||
instrumentation_tag = [t for t in tags if t.startswith("instrumentation ")]
|
||||
if not instrumentation_tag:
|
||||
raise Err("Can not find instrumentation details in: %s", exe)
|
||||
res.pkg_runner = re.search(r"^[ ]+A: android:name\(0x[0-9a-f]{8}\)=\"(?P<runner>.*?)\" \(Raw: \"(?P=runner)\"\)\r?$", instrumentation_tag[0], flags=re.MULTILINE).group("runner")
|
||||
res.pkg_target = re.search(r"^[ ]+A: android:targetPackage\(0x[0-9a-f]{8}\)=\"(?P<pkg>.*?)\" \(Raw: \"(?P=pkg)\"\)\r?$", instrumentation_tag[0], flags=re.MULTILINE).group("pkg")
|
||||
if not res.pkg_name or not res.pkg_runner or not res.pkg_target:
|
||||
raise Err("Can not find instrumentation details in: %s", exe)
|
||||
return res
|
||||
|
||||
|
||||
class AndroidTestSuite(TestSuite):
|
||||
def __init__(self, options, cache, id, android_env={}):
|
||||
TestSuite.__init__(self, options, cache, id)
|
||||
sdk_dir = options.android_sdk or os.environ.get("ANDROID_SDK", False) or os.path.dirname(os.path.dirname(self.cache.android_executable))
|
||||
log.debug("Detecting Android tools in directory: %s", sdk_dir)
|
||||
self.adb = Adb(sdk_dir)
|
||||
self.aapt = Aapt(sdk_dir)
|
||||
self.env = android_env
|
||||
|
||||
def isTest(self, fullpath):
|
||||
if os.path.isfile(fullpath):
|
||||
if fullpath.endswith(".apk") or os.access(fullpath, os.X_OK):
|
||||
return True
|
||||
return False
|
||||
|
||||
def getOS(self):
|
||||
return self.adb.getOSIdentifier()
|
||||
|
||||
def checkPrerequisites(self):
|
||||
self.adb.init(self.options.serial)
|
||||
|
||||
def runTest(self, module, path, logfile, workingDir, args=[]):
|
||||
args = args[:]
|
||||
exe = os.path.abspath(path)
|
||||
|
||||
if exe.endswith(".apk"):
|
||||
info = self.aapt.dump(exe)
|
||||
if not info:
|
||||
raise Err("Can not read info from test package: %s", exe)
|
||||
info.forcePackage(self.options.package)
|
||||
self.adb.run(["uninstall", info.pkg_name])
|
||||
|
||||
output = self.adb.run(["install", exe], silent=True)
|
||||
if not (output and "Success" in output):
|
||||
raise Err("Can not install package: %s", exe)
|
||||
|
||||
params = ["-e package %s" % info.pkg_target]
|
||||
ret = self.adb.run(["shell", "am instrument -w %s %s/%s" % (" ".join(params), info.pkg_name, info.pkg_runner)])
|
||||
return None, ret
|
||||
else:
|
||||
device_dir = getpass.getuser().replace(" ", "") + "_" + self.options.mode + "/"
|
||||
if isColorEnabled(args):
|
||||
args.append("--gtest_color=yes")
|
||||
tempdir = "/data/local/tmp/"
|
||||
android_dir = tempdir + device_dir
|
||||
exename = os.path.basename(exe)
|
||||
android_exe = android_dir + exename
|
||||
self.adb.run(["push", exe, android_exe])
|
||||
self.adb.run(["shell", "chmod 777 " + android_exe])
|
||||
env_pieces = ["export %s=%s" % (a, b) for a, b in self.env.items()]
|
||||
pieces = ["cd %s" % android_dir, "./%s %s" % (exename, " ".join(args))]
|
||||
log.warning("Run: %s" % " && ".join(pieces))
|
||||
ret = self.adb.run(["shell", " && ".join(env_pieces + pieces)])
|
||||
# try get log
|
||||
hostlogpath = os.path.join(workingDir, logfile)
|
||||
self.adb.run(["pull", android_dir + logfile, hostlogpath])
|
||||
# cleanup
|
||||
self.adb.run(["shell", "rm " + android_dir + logfile])
|
||||
self.adb.run(["shell", "rm " + tempdir + "__opencv_temp.*"], silent=True)
|
||||
if os.path.isfile(hostlogpath):
|
||||
return hostlogpath, ret
|
||||
return None, ret
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
log.error("This is utility file, please execute run.py script")
|
119
3rdparty/opencv-4.5.4/modules/ts/misc/run_long.py
vendored
Normal file
119
3rdparty/opencv-4.5.4/modules/ts/misc/run_long.py
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
#!/usr/bin/env python
|
||||
from __future__ import print_function
|
||||
import xml.etree.ElementTree as ET
|
||||
from glob import glob
|
||||
from pprint import PrettyPrinter as PP
|
||||
|
||||
LONG_TESTS_DEBUG_VALGRIND = [
|
||||
('calib3d', 'Calib3d_InitUndistortRectifyMap.accuracy', 2017.22),
|
||||
('dnn', 'Reproducibility*', 1000), # large DNN models
|
||||
('dnn', '*RCNN*', 1000), # very large DNN models
|
||||
('dnn', '*RFCN*', 1000), # very large DNN models
|
||||
('dnn', '*EAST*', 1000), # very large DNN models
|
||||
('dnn', '*VGG16*', 1000), # very large DNN models
|
||||
('dnn', '*ZFNet*', 1000), # very large DNN models
|
||||
('dnn', '*ResNet101_DUC_HDC*', 1000), # very large DNN models
|
||||
('dnn', '*LResNet100E_IR*', 1000), # very large DNN models
|
||||
('dnn', '*read_yolo_voc_stream*', 1000), # very large DNN models
|
||||
('dnn', '*eccv16*', 1000), # very large DNN models
|
||||
('dnn', '*OpenPose*', 1000), # very large DNN models
|
||||
('dnn', '*SSD/*', 1000), # very large DNN models
|
||||
('gapi', 'Fluid.MemoryConsumptionDoesNotGrowOnReshape', 1000000), # test doesn't work properly under valgrind
|
||||
('face', 'CV_Face_FacemarkLBF.test_workflow', 10000.0), # >40min on i7
|
||||
('features2d', 'Features2d/DescriptorImage.no_crash/3', 1000),
|
||||
('features2d', 'Features2d/DescriptorImage.no_crash/4', 1000),
|
||||
('features2d', 'Features2d/DescriptorImage.no_crash/5', 1000),
|
||||
('features2d', 'Features2d/DescriptorImage.no_crash/6', 1000),
|
||||
('features2d', 'Features2d/DescriptorImage.no_crash/7', 1000),
|
||||
('imgcodecs', 'Imgcodecs_Png.write_big', 1000), # memory limit
|
||||
('imgcodecs', 'Imgcodecs_Tiff.decode_tile16384x16384', 1000), # memory limit
|
||||
('ml', 'ML_RTrees.regression', 1423.47),
|
||||
('optflow', 'DenseOpticalFlow_DeepFlow.ReferenceAccuracy', 1360.95),
|
||||
('optflow', 'DenseOpticalFlow_DeepFlow_perf.perf/0', 1881.59),
|
||||
('optflow', 'DenseOpticalFlow_DeepFlow_perf.perf/1', 5608.75),
|
||||
('optflow', 'DenseOpticalFlow_GlobalPatchColliderDCT.ReferenceAccuracy', 5433.84),
|
||||
('optflow', 'DenseOpticalFlow_GlobalPatchColliderWHT.ReferenceAccuracy', 5232.73),
|
||||
('optflow', 'DenseOpticalFlow_SimpleFlow.ReferenceAccuracy', 1542.1),
|
||||
('photo', 'Photo_Denoising.speed', 1484.87),
|
||||
('photo', 'Photo_DenoisingColoredMulti.regression', 2447.11),
|
||||
('rgbd', 'Rgbd_Normals.compute', 1156.32),
|
||||
('shape', 'Hauss.regression', 2625.72),
|
||||
('shape', 'ShapeEMD_SCD.regression', 61913.7),
|
||||
('shape', 'Shape_SCD.regression', 3311.46),
|
||||
('tracking', 'AUKF.br_mean_squared_error', 10764.6),
|
||||
('tracking', 'UKF.br_mean_squared_error', 5228.27),
|
||||
('tracking', '*DistanceAndOverlap*/1', 1000.0), # dudek
|
||||
('tracking', '*DistanceAndOverlap*/2', 1000.0), # faceocc2
|
||||
('videoio', 'videoio/videoio_ffmpeg.write_big*', 1000),
|
||||
('videoio', 'videoio_ffmpeg.parallel', 1000),
|
||||
('videoio', '*videocapture_acceleration*', 1000), # valgrind can't track HW buffers: Conditional jump or move depends on uninitialised value(s)
|
||||
('videoio', '*videowriter_acceleration*', 1000), # valgrind crash: set_mempolicy: Operation not permitted
|
||||
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_BoostDesc_LBGM.regression', 1124.51),
|
||||
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG120.regression', 2198.1),
|
||||
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG48.regression', 1958.52),
|
||||
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG64.regression', 2113.12),
|
||||
('xfeatures2d', 'Features2d_RotationInvariance_Descriptor_VGG80.regression', 2167.16),
|
||||
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_BoostDesc_LBGM.regression', 1511.39),
|
||||
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG120.regression', 1222.07),
|
||||
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG48.regression', 1059.14),
|
||||
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG64.regression', 1163.41),
|
||||
('xfeatures2d', 'Features2d_ScaleInvariance_Descriptor_VGG80.regression', 1179.06),
|
||||
('ximgproc', 'L0SmoothTest.SplatSurfaceAccuracy', 6382.26),
|
||||
('ximgproc', 'perf*/1*:perf*/2*:perf*/3*:perf*/4*:perf*/5*:perf*/6*:perf*/7*:perf*/8*:perf*/9*', 1000.0), # only first 10 parameters
|
||||
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.MultiThreadReproducibility/5', 1086.33),
|
||||
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.MultiThreadReproducibility/7', 1405.05),
|
||||
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.SplatSurfaceAccuracy/5', 1253.07),
|
||||
('ximgproc', 'TypicalSet1/RollingGuidanceFilterTest.SplatSurfaceAccuracy/7', 1599.98),
|
||||
('ximgproc', '*MultiThreadReproducibility*/1:*MultiThreadReproducibility*/2:*MultiThreadReproducibility*/3:*MultiThreadReproducibility*/4:*MultiThreadReproducibility*/5:*MultiThreadReproducibility*/6:*MultiThreadReproducibility*/7:*MultiThreadReproducibility*/8:*MultiThreadReproducibility*/9:*MultiThreadReproducibility*/1*', 1000.0),
|
||||
('ximgproc', '*AdaptiveManifoldRefImplTest*/1:*AdaptiveManifoldRefImplTest*/2:*AdaptiveManifoldRefImplTest*/3', 1000.0),
|
||||
('ximgproc', '*JointBilateralFilterTest_NaiveRef*', 1000.0),
|
||||
('ximgproc', '*RollingGuidanceFilterTest_BilateralRef*/1*:*RollingGuidanceFilterTest_BilateralRef*/2*:*RollingGuidanceFilterTest_BilateralRef*/3*', 1000.0),
|
||||
('ximgproc', '*JointBilateralFilterTest_NaiveRef*', 1000.0),
|
||||
]
|
||||
|
||||
|
||||
def longTestFilter(data, module=None):
|
||||
res = ['*', '-'] + [v for m, v, _time in data if module is None or m == module]
|
||||
return '--gtest_filter={}'.format(':'.join(res))
|
||||
|
||||
|
||||
# Parse one xml file, filter out tests which took less than 'timeLimit' seconds
|
||||
# Returns tuple: ( <module_name>, [ (<module_name>, <test_name>, <test_time>), ... ] )
|
||||
def parseOneFile(filename, timeLimit):
|
||||
tree = ET.parse(filename)
|
||||
root = tree.getroot()
|
||||
|
||||
def guess(s, delims):
|
||||
for delim in delims:
|
||||
tmp = s.partition(delim)
|
||||
if len(tmp[1]) != 0:
|
||||
return tmp[0]
|
||||
return None
|
||||
module = guess(filename, ['_posix_', '_nt_', '__']) or root.get('cv_module_name')
|
||||
if not module:
|
||||
return (None, None)
|
||||
res = []
|
||||
for elem in root.findall('.//testcase'):
|
||||
key = '{}.{}'.format(elem.get('classname'), elem.get('name'))
|
||||
val = elem.get('time')
|
||||
if float(val) >= timeLimit:
|
||||
res.append((module, key, float(val)))
|
||||
return (module, res)
|
||||
|
||||
|
||||
# Parse all xml files in current folder and combine results into one list
|
||||
# Print result to the stdout
|
||||
if __name__ == '__main__':
|
||||
LIMIT = 1000
|
||||
res = []
|
||||
xmls = glob('*.xml')
|
||||
for xml in xmls:
|
||||
print('Parsing file', xml, '...')
|
||||
module, testinfo = parseOneFile(xml, LIMIT)
|
||||
if not module:
|
||||
print('SKIP')
|
||||
continue
|
||||
res.extend(testinfo)
|
||||
|
||||
print('========= RESULTS =========')
|
||||
PP(indent=4, width=100).pprint(sorted(res))
|
206
3rdparty/opencv-4.5.4/modules/ts/misc/run_suite.py
vendored
Normal file
206
3rdparty/opencv-4.5.4/modules/ts/misc/run_suite.py
vendored
Normal file
@ -0,0 +1,206 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from run_utils import Err, log, execute, getPlatformVersion, isColorEnabled, TempEnvDir
|
||||
from run_long import LONG_TESTS_DEBUG_VALGRIND, longTestFilter
|
||||
|
||||
|
||||
class TestSuite(object):
|
||||
def __init__(self, options, cache, id):
|
||||
self.options = options
|
||||
self.cache = cache
|
||||
self.nameprefix = "opencv_" + self.options.mode + "_"
|
||||
self.tests = self.cache.gatherTests(self.nameprefix + "*", self.isTest)
|
||||
self.id = id
|
||||
|
||||
def getOS(self):
|
||||
return getPlatformVersion() or self.cache.getOS()
|
||||
|
||||
def getLogName(self, app):
|
||||
return self.getAlias(app) + '_' + str(self.id) + '.xml'
|
||||
|
||||
def listTests(self, short=False, main=False):
|
||||
if len(self.tests) == 0:
|
||||
raise Err("No tests found")
|
||||
for t in self.tests:
|
||||
if short:
|
||||
t = self.getAlias(t)
|
||||
if not main or self.cache.isMainModule(t):
|
||||
log.info("%s", t)
|
||||
|
||||
def getAlias(self, fname):
|
||||
return sorted(self.getAliases(fname), key=len)[0]
|
||||
|
||||
def getAliases(self, fname):
|
||||
def getCuts(fname, prefix):
|
||||
# filename w/o extension (opencv_test_core)
|
||||
noext = re.sub(r"\.(exe|apk)$", '', fname)
|
||||
# filename w/o prefix (core.exe)
|
||||
nopref = fname
|
||||
if fname.startswith(prefix):
|
||||
nopref = fname[len(prefix):]
|
||||
# filename w/o prefix and extension (core)
|
||||
noprefext = noext
|
||||
if noext.startswith(prefix):
|
||||
noprefext = noext[len(prefix):]
|
||||
return noext, nopref, noprefext
|
||||
# input is full path ('/home/.../bin/opencv_test_core') or 'java'
|
||||
res = [fname]
|
||||
fname = os.path.basename(fname)
|
||||
res.append(fname) # filename (opencv_test_core.exe)
|
||||
for s in getCuts(fname, self.nameprefix):
|
||||
res.append(s)
|
||||
if self.cache.build_type == "Debug" and "Visual Studio" in self.cache.cmake_generator:
|
||||
res.append(re.sub(r"d$", '', s)) # MSVC debug config, remove 'd' suffix
|
||||
log.debug("Aliases: %s", set(res))
|
||||
return set(res)
|
||||
|
||||
def getTest(self, name):
|
||||
# return stored test name by provided alias
|
||||
for t in self.tests:
|
||||
if name in self.getAliases(t):
|
||||
return t
|
||||
raise Err("Can not find test: %s", name)
|
||||
|
||||
def getTestList(self, white, black):
|
||||
res = [t for t in white or self.tests if self.getAlias(t) not in black]
|
||||
if len(res) == 0:
|
||||
raise Err("No tests found")
|
||||
return set(res)
|
||||
|
||||
def isTest(self, fullpath):
|
||||
if fullpath in ['java', 'python2', 'python3']:
|
||||
return self.options.mode == 'test'
|
||||
if not os.path.isfile(fullpath):
|
||||
return False
|
||||
if self.cache.getOS() == "nt" and not fullpath.endswith(".exe"):
|
||||
return False
|
||||
return os.access(fullpath, os.X_OK)
|
||||
|
||||
def wrapCommand(self, module, cmd, env):
|
||||
if self.options.valgrind:
|
||||
res = ['valgrind']
|
||||
supp = self.options.valgrind_supp or []
|
||||
for f in supp:
|
||||
if os.path.isfile(f):
|
||||
res.append("--suppressions=%s" % f)
|
||||
else:
|
||||
print("WARNING: Valgrind suppression file is missing, SKIP: %s" % f)
|
||||
res.extend(self.options.valgrind_opt)
|
||||
has_gtest_filter = next((True for x in cmd if x.startswith('--gtest_filter=')), False)
|
||||
return res + cmd + ([longTestFilter(LONG_TESTS_DEBUG_VALGRIND, module)] if not has_gtest_filter else [])
|
||||
elif self.options.qemu:
|
||||
import shlex
|
||||
res = shlex.split(self.options.qemu)
|
||||
for (name, value) in [entry for entry in os.environ.items() if entry[0].startswith('OPENCV') and not entry[0] in env]:
|
||||
res += ['-E', '"{}={}"'.format(name, value)]
|
||||
for (name, value) in env.items():
|
||||
res += ['-E', '"{}={}"'.format(name, value)]
|
||||
return res + ['--'] + cmd
|
||||
return cmd
|
||||
|
||||
def tryCommand(self, cmd, workingDir):
|
||||
try:
|
||||
if 0 == execute(cmd, cwd=workingDir):
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
return False
|
||||
|
||||
def runTest(self, module, path, logfile, workingDir, args=[]):
|
||||
args = args[:]
|
||||
exe = os.path.abspath(path)
|
||||
if module == "java":
|
||||
cmd = [self.cache.ant_executable, "-Dopencv.build.type=%s" % self.cache.build_type]
|
||||
if self.options.package:
|
||||
cmd += ["-Dopencv.test.package=%s" % self.options.package]
|
||||
if self.options.java_test_exclude:
|
||||
cmd += ["-Dopencv.test.exclude=%s" % self.options.java_test_exclude]
|
||||
cmd += ["buildAndTest"]
|
||||
ret = execute(cmd, cwd=self.cache.java_test_dir)
|
||||
return None, ret
|
||||
elif module in ['python2', 'python3']:
|
||||
executable = os.getenv('OPENCV_PYTHON_BINARY', None)
|
||||
if executable is None or module == 'python{}'.format(sys.version_info[0]):
|
||||
executable = sys.executable
|
||||
if executable is None:
|
||||
executable = path
|
||||
if not self.tryCommand([executable, '--version'], workingDir):
|
||||
executable = 'python'
|
||||
cmd = [executable, self.cache.opencv_home + '/modules/python/test/test.py', '--repo', self.cache.opencv_home, '-v'] + args
|
||||
module_suffix = '' if 'Visual Studio' not in self.cache.cmake_generator else '/' + self.cache.build_type
|
||||
env = {}
|
||||
env['PYTHONPATH'] = self.cache.opencv_build + '/lib' + module_suffix + os.pathsep + os.getenv('PYTHONPATH', '')
|
||||
if self.cache.getOS() == 'nt':
|
||||
env['PATH'] = self.cache.opencv_build + '/bin' + module_suffix + os.pathsep + os.getenv('PATH', '')
|
||||
else:
|
||||
env['LD_LIBRARY_PATH'] = self.cache.opencv_build + '/bin' + os.pathsep + os.getenv('LD_LIBRARY_PATH', '')
|
||||
ret = execute(cmd, cwd=workingDir, env=env)
|
||||
return None, ret
|
||||
else:
|
||||
if isColorEnabled(args):
|
||||
args.append("--gtest_color=yes")
|
||||
env = {}
|
||||
if not self.options.valgrind and self.options.trace:
|
||||
env['OPENCV_TRACE'] = '1'
|
||||
env['OPENCV_TRACE_LOCATION'] = 'OpenCVTrace-{}'.format(self.getLogBaseName(exe))
|
||||
env['OPENCV_TRACE_SYNC_OPENCL'] = '1'
|
||||
tempDir = TempEnvDir('OPENCV_TEMP_PATH', "__opencv_temp.")
|
||||
tempDir.init()
|
||||
cmd = self.wrapCommand(module, [exe] + args, env)
|
||||
log.warning("Run: %s" % " ".join(cmd))
|
||||
ret = execute(cmd, cwd=workingDir, env=env)
|
||||
try:
|
||||
if not self.options.valgrind and self.options.trace and int(self.options.trace_dump) >= 0:
|
||||
import trace_profiler
|
||||
trace = trace_profiler.Trace(env['OPENCV_TRACE_LOCATION']+'.txt')
|
||||
trace.process()
|
||||
trace.dump(max_entries=int(self.options.trace_dump))
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
pass
|
||||
tempDir.clean()
|
||||
hostlogpath = os.path.join(workingDir, logfile)
|
||||
if os.path.isfile(hostlogpath):
|
||||
return hostlogpath, ret
|
||||
return None, ret
|
||||
|
||||
def runTests(self, tests, black, workingDir, args=[]):
|
||||
args = args[:]
|
||||
logs = []
|
||||
test_list = self.getTestList(tests, black)
|
||||
if len(test_list) != 1:
|
||||
args = [a for a in args if not a.startswith("--gtest_output=")]
|
||||
ret = 0
|
||||
for test in test_list:
|
||||
more_args = []
|
||||
exe = self.getTest(test)
|
||||
|
||||
if exe in ["java", "python2", "python3"]:
|
||||
logname = None
|
||||
else:
|
||||
userlog = [a for a in args if a.startswith("--gtest_output=")]
|
||||
if len(userlog) == 0:
|
||||
logname = self.getLogName(exe)
|
||||
more_args.append("--gtest_output=xml:" + logname)
|
||||
else:
|
||||
logname = userlog[0][userlog[0].find(":")+1:]
|
||||
|
||||
log.debug("Running the test: %s (%s) ==> %s in %s", exe, args + more_args, logname, workingDir)
|
||||
if self.options.dry_run:
|
||||
logfile, r = None, 0
|
||||
else:
|
||||
logfile, r = self.runTest(test, exe, logname, workingDir, args + more_args)
|
||||
log.debug("Test returned: %s ==> %s", r, logfile)
|
||||
|
||||
if r != 0:
|
||||
ret = r
|
||||
if logfile:
|
||||
logs.append(os.path.relpath(logfile, workingDir))
|
||||
return logs, ret
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
log.error("This is utility file, please execute run.py script")
|
203
3rdparty/opencv-4.5.4/modules/ts/misc/run_utils.py
vendored
Normal file
203
3rdparty/opencv-4.5.4/modules/ts/misc/run_utils.py
vendored
Normal file
@ -0,0 +1,203 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import tempfile
|
||||
import glob
|
||||
import logging
|
||||
import shutil
|
||||
from subprocess import check_call, check_output, CalledProcessError, STDOUT
|
||||
|
||||
|
||||
def initLogger():
|
||||
logger = logging.getLogger("run.py")
|
||||
logger.setLevel(logging.DEBUG)
|
||||
ch = logging.StreamHandler(sys.stderr)
|
||||
ch.setFormatter(logging.Formatter("%(message)s"))
|
||||
logger.addHandler(ch)
|
||||
return logger
|
||||
|
||||
|
||||
log = initLogger()
|
||||
hostos = os.name # 'nt', 'posix'
|
||||
|
||||
|
||||
class Err(Exception):
|
||||
def __init__(self, msg, *args):
|
||||
self.msg = msg % args
|
||||
|
||||
|
||||
def execute(cmd, silent=False, cwd=".", env=None):
|
||||
try:
|
||||
log.debug("Run: %s", cmd)
|
||||
if env is not None:
|
||||
for k in env:
|
||||
log.debug(" Environ: %s=%s", k, env[k])
|
||||
new_env = os.environ.copy()
|
||||
new_env.update(env)
|
||||
env = new_env
|
||||
|
||||
if sys.platform == 'darwin': # https://github.com/opencv/opencv/issues/14351
|
||||
if env is None:
|
||||
env = os.environ.copy()
|
||||
if 'DYLD_LIBRARY_PATH' in env:
|
||||
env['OPENCV_SAVED_DYLD_LIBRARY_PATH'] = env['DYLD_LIBRARY_PATH']
|
||||
|
||||
if silent:
|
||||
return check_output(cmd, stderr=STDOUT, cwd=cwd, env=env).decode("latin-1")
|
||||
else:
|
||||
return check_call(cmd, cwd=cwd, env=env)
|
||||
except CalledProcessError as e:
|
||||
if silent:
|
||||
log.debug("Process returned: %d", e.returncode)
|
||||
return e.output.decode("latin-1")
|
||||
else:
|
||||
log.error("Process returned: %d", e.returncode)
|
||||
return e.returncode
|
||||
|
||||
|
||||
def isColorEnabled(args):
|
||||
usercolor = [a for a in args if a.startswith("--gtest_color=")]
|
||||
return len(usercolor) == 0 and sys.stdout.isatty() and hostos != "nt"
|
||||
|
||||
|
||||
def getPlatformVersion():
|
||||
mv = platform.mac_ver()
|
||||
if mv[0]:
|
||||
return "Darwin" + mv[0]
|
||||
else:
|
||||
wv = platform.win32_ver()
|
||||
if wv[0]:
|
||||
return "Windows" + wv[0]
|
||||
else:
|
||||
lv = platform.linux_distribution()
|
||||
if lv[0]:
|
||||
return lv[0] + lv[1]
|
||||
return None
|
||||
|
||||
|
||||
parse_patterns = (
|
||||
{'name': "cmake_home", 'default': None, 'pattern': re.compile(r"^CMAKE_HOME_DIRECTORY:\w+=(.+)$")},
|
||||
{'name': "opencv_home", 'default': None, 'pattern': re.compile(r"^OpenCV_SOURCE_DIR:\w+=(.+)$")},
|
||||
{'name': "opencv_build", 'default': None, 'pattern': re.compile(r"^OpenCV_BINARY_DIR:\w+=(.+)$")},
|
||||
{'name': "tests_dir", 'default': None, 'pattern': re.compile(r"^EXECUTABLE_OUTPUT_PATH:\w+=(.+)$")},
|
||||
{'name': "build_type", 'default': "Release", 'pattern': re.compile(r"^CMAKE_BUILD_TYPE:\w+=(.*)$")},
|
||||
{'name': "android_abi", 'default': None, 'pattern': re.compile(r"^ANDROID_ABI:\w+=(.*)$")},
|
||||
{'name': "android_executable", 'default': None, 'pattern': re.compile(r"^ANDROID_EXECUTABLE:\w+=(.*android.*)$")},
|
||||
{'name': "ant_executable", 'default': None, 'pattern': re.compile(r"^ANT_EXECUTABLE:\w+=(.*ant.*)$")},
|
||||
{'name': "java_test_dir", 'default': None, 'pattern': re.compile(r"^OPENCV_JAVA_TEST_DIR:\w+=(.*)$")},
|
||||
{'name': "is_x64", 'default': "OFF", 'pattern': re.compile(r"^CUDA_64_BIT_DEVICE_CODE:\w+=(ON)$")},
|
||||
{'name': "cmake_generator", 'default': None, 'pattern': re.compile(r"^CMAKE_GENERATOR:\w+=(.+)$")},
|
||||
{'name': "python2", 'default': None, 'pattern': re.compile(r"^BUILD_opencv_python2:\w+=(.*)$")},
|
||||
{'name': "python3", 'default': None, 'pattern': re.compile(r"^BUILD_opencv_python3:\w+=(.*)$")},
|
||||
)
|
||||
|
||||
|
||||
class CMakeCache:
|
||||
def __init__(self, cfg=None):
|
||||
self.setDefaultAttrs()
|
||||
self.main_modules = []
|
||||
if cfg:
|
||||
self.build_type = cfg
|
||||
|
||||
def setDummy(self, path):
|
||||
self.tests_dir = os.path.normpath(path)
|
||||
|
||||
def read(self, path, fname):
|
||||
rx = re.compile(r'^OPENCV_MODULE_opencv_(\w+)_LOCATION:INTERNAL=(.*)$')
|
||||
module_paths = {} # name -> path
|
||||
with open(fname, "rt") as cachefile:
|
||||
for l in cachefile.readlines():
|
||||
ll = l.strip()
|
||||
if not ll or ll.startswith("#"):
|
||||
continue
|
||||
for p in parse_patterns:
|
||||
match = p["pattern"].match(ll)
|
||||
if match:
|
||||
value = match.groups()[0]
|
||||
if value and not value.endswith("-NOTFOUND"):
|
||||
setattr(self, p["name"], value)
|
||||
# log.debug("cache value: %s = %s", p["name"], value)
|
||||
|
||||
match = rx.search(ll)
|
||||
if match:
|
||||
module_paths[match.group(1)] = match.group(2)
|
||||
|
||||
if not self.tests_dir:
|
||||
self.tests_dir = path
|
||||
else:
|
||||
rel = os.path.relpath(self.tests_dir, self.opencv_build)
|
||||
self.tests_dir = os.path.join(path, rel)
|
||||
self.tests_dir = os.path.normpath(self.tests_dir)
|
||||
|
||||
# fix VS test binary path (add Debug or Release)
|
||||
if "Visual Studio" in self.cmake_generator:
|
||||
self.tests_dir = os.path.join(self.tests_dir, self.build_type)
|
||||
|
||||
for module, path in module_paths.items():
|
||||
rel = os.path.relpath(path, self.opencv_home)
|
||||
if ".." not in rel:
|
||||
self.main_modules.append(module)
|
||||
|
||||
def setDefaultAttrs(self):
|
||||
for p in parse_patterns:
|
||||
setattr(self, p["name"], p["default"])
|
||||
|
||||
def gatherTests(self, mask, isGood=None):
|
||||
if self.tests_dir and os.path.isdir(self.tests_dir):
|
||||
d = os.path.abspath(self.tests_dir)
|
||||
files = glob.glob(os.path.join(d, mask))
|
||||
if not self.getOS() == "android" and self.withJava():
|
||||
files.append("java")
|
||||
if self.withPython2():
|
||||
files.append("python2")
|
||||
if self.withPython3():
|
||||
files.append("python3")
|
||||
return [f for f in files if isGood(f)]
|
||||
return []
|
||||
|
||||
def isMainModule(self, name):
|
||||
return name in self.main_modules + ['python2', 'python3']
|
||||
|
||||
def withJava(self):
|
||||
return self.ant_executable and self.java_test_dir and os.path.exists(self.java_test_dir)
|
||||
|
||||
def withPython2(self):
|
||||
return self.python2 == 'ON'
|
||||
|
||||
def withPython3(self):
|
||||
return self.python3 == 'ON'
|
||||
|
||||
def getOS(self):
|
||||
if self.android_executable:
|
||||
return "android"
|
||||
else:
|
||||
return hostos
|
||||
|
||||
|
||||
class TempEnvDir:
|
||||
def __init__(self, envname, prefix):
|
||||
self.envname = envname
|
||||
self.prefix = prefix
|
||||
self.saved_name = None
|
||||
self.new_name = None
|
||||
|
||||
def init(self):
|
||||
self.saved_name = os.environ.get(self.envname)
|
||||
self.new_name = tempfile.mkdtemp(prefix=self.prefix, dir=self.saved_name or None)
|
||||
os.environ[self.envname] = self.new_name
|
||||
|
||||
def clean(self):
|
||||
if self.saved_name:
|
||||
os.environ[self.envname] = self.saved_name
|
||||
else:
|
||||
del os.environ[self.envname]
|
||||
try:
|
||||
shutil.rmtree(self.new_name)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
log.error("This is utility file, please execute run.py script")
|
296
3rdparty/opencv-4.5.4/modules/ts/misc/summary.py
vendored
Executable file
296
3rdparty/opencv-4.5.4/modules/ts/misc/summary.py
vendored
Executable file
@ -0,0 +1,296 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import testlog_parser, sys, os, xml, glob, re
|
||||
from table_formatter import *
|
||||
from optparse import OptionParser
|
||||
|
||||
numeric_re = re.compile("(\d+)")
|
||||
cvtype_re = re.compile("(8U|8S|16U|16S|32S|32F|64F)C(\d{1,3})")
|
||||
cvtypes = { '8U': 0, '8S': 1, '16U': 2, '16S': 3, '32S': 4, '32F': 5, '64F': 6 }
|
||||
|
||||
convert = lambda text: int(text) if text.isdigit() else text
|
||||
keyselector = lambda a: cvtype_re.sub(lambda match: " " + str(cvtypes.get(match.group(1), 7) + (int(match.group(2))-1) * 8) + " ", a)
|
||||
alphanum_keyselector = lambda key: [ convert(c) for c in numeric_re.split(keyselector(key)) ]
|
||||
|
||||
def getSetName(tset, idx, columns, short = True):
|
||||
if columns and len(columns) > idx:
|
||||
prefix = columns[idx]
|
||||
else:
|
||||
prefix = None
|
||||
if short and prefix:
|
||||
return prefix
|
||||
name = tset[0].replace(".xml","").replace("_", "\n")
|
||||
if prefix:
|
||||
return prefix + "\n" + ("-"*int(len(max(prefix.split("\n"), key=len))*1.5)) + "\n" + name
|
||||
return name
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print >> sys.stderr, "Usage:\n", os.path.basename(sys.argv[0]), "<log_name1>.xml [<log_name2>.xml ...]"
|
||||
exit(0)
|
||||
|
||||
parser = OptionParser()
|
||||
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html', 'markdown', 'tabs' or 'auto' - default)", metavar="FMT", default="auto")
|
||||
parser.add_option("-m", "--metric", dest="metric", help="output metric", metavar="NAME", default="gmean")
|
||||
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), us, ns or ticks)", metavar="UNITS", default="ms")
|
||||
parser.add_option("-f", "--filter", dest="filter", help="regex to filter tests", metavar="REGEX", default=None)
|
||||
parser.add_option("", "--module", dest="module", default=None, metavar="NAME", help="module prefix for test names")
|
||||
parser.add_option("", "--columns", dest="columns", default=None, metavar="NAMES", help="comma-separated list of column aliases")
|
||||
parser.add_option("", "--no-relatives", action="store_false", dest="calc_relatives", default=True, help="do not output relative values")
|
||||
parser.add_option("", "--with-cycles-reduction", action="store_true", dest="calc_cr", default=False, help="output cycle reduction percentages")
|
||||
parser.add_option("", "--with-score", action="store_true", dest="calc_score", default=False, help="output automatic classification of speedups")
|
||||
parser.add_option("", "--progress", action="store_true", dest="progress_mode", default=False, help="enable progress mode")
|
||||
parser.add_option("", "--regressions", dest="regressions", default=None, metavar="LIST", help="comma-separated custom regressions map: \"[r][c]#current-#reference\" (indexes of columns are 0-based, \"r\" - reverse flag, \"c\" - color flag for base data)")
|
||||
parser.add_option("", "--show-all", action="store_true", dest="showall", default=False, help="also include empty and \"notrun\" lines")
|
||||
parser.add_option("", "--match", dest="match", default=None)
|
||||
parser.add_option("", "--match-replace", dest="match_replace", default="")
|
||||
parser.add_option("", "--regressions-only", dest="regressionsOnly", default=None, metavar="X-FACTOR", help="show only tests with performance regressions not")
|
||||
parser.add_option("", "--intersect-logs", dest="intersect_logs", default=False, help="show only tests present in all log files")
|
||||
parser.add_option("", "--show_units", action="store_true", dest="show_units", help="append units into table cells")
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
options.generateHtml = detectHtmlOutputType(options.format)
|
||||
if options.metric not in metrix_table:
|
||||
options.metric = "gmean"
|
||||
if options.metric.endswith("%") or options.metric.endswith("$"):
|
||||
options.calc_relatives = False
|
||||
options.calc_cr = False
|
||||
if options.columns:
|
||||
options.columns = [s.strip().replace("\\n", "\n") for s in options.columns.split(",")]
|
||||
|
||||
if options.regressions:
|
||||
assert not options.progress_mode, 'unsupported mode'
|
||||
|
||||
def parseRegressionColumn(s):
|
||||
""" Format: '[r][c]<uint>-<uint>' """
|
||||
reverse = s.startswith('r')
|
||||
if reverse:
|
||||
s = s[1:]
|
||||
addColor = s.startswith('c')
|
||||
if addColor:
|
||||
s = s[1:]
|
||||
parts = s.split('-', 1)
|
||||
link = (int(parts[0]), int(parts[1]), reverse, addColor)
|
||||
assert link[0] != link[1]
|
||||
return link
|
||||
|
||||
options.regressions = [parseRegressionColumn(s) for s in options.regressions.split(',')]
|
||||
|
||||
show_units = options.units if options.show_units else None
|
||||
|
||||
# expand wildcards and filter duplicates
|
||||
files = []
|
||||
seen = set()
|
||||
for arg in args:
|
||||
if ("*" in arg) or ("?" in arg):
|
||||
flist = [os.path.abspath(f) for f in glob.glob(arg)]
|
||||
flist = sorted(flist, key= lambda text: str(text).replace("M", "_"))
|
||||
files.extend([ x for x in flist if x not in seen and not seen.add(x)])
|
||||
else:
|
||||
fname = os.path.abspath(arg)
|
||||
if fname not in seen and not seen.add(fname):
|
||||
files.append(fname)
|
||||
|
||||
# read all passed files
|
||||
test_sets = []
|
||||
for arg in files:
|
||||
try:
|
||||
tests = testlog_parser.parseLogFile(arg)
|
||||
if options.filter:
|
||||
expr = re.compile(options.filter)
|
||||
tests = [t for t in tests if expr.search(str(t))]
|
||||
if options.match:
|
||||
tests = [t for t in tests if t.get("status") != "notrun"]
|
||||
if tests:
|
||||
test_sets.append((os.path.basename(arg), tests))
|
||||
except IOError as err:
|
||||
sys.stderr.write("IOError reading \"" + arg + "\" - " + str(err) + os.linesep)
|
||||
except xml.parsers.expat.ExpatError as err:
|
||||
sys.stderr.write("ExpatError reading \"" + arg + "\" - " + str(err) + os.linesep)
|
||||
|
||||
if not test_sets:
|
||||
sys.stderr.write("Error: no test data found" + os.linesep)
|
||||
quit()
|
||||
|
||||
setsCount = len(test_sets)
|
||||
|
||||
if options.regressions is None:
|
||||
reference = -1 if options.progress_mode else 0
|
||||
options.regressions = [(i, reference, False, True) for i in range(1, len(test_sets))]
|
||||
|
||||
for link in options.regressions:
|
||||
(i, ref, reverse, addColor) = link
|
||||
assert i >= 0 and i < setsCount
|
||||
assert ref < setsCount
|
||||
|
||||
# find matches
|
||||
test_cases = {}
|
||||
|
||||
name_extractor = lambda name: str(name)
|
||||
if options.match:
|
||||
reg = re.compile(options.match)
|
||||
name_extractor = lambda name: reg.sub(options.match_replace, str(name))
|
||||
|
||||
for i in range(setsCount):
|
||||
for case in test_sets[i][1]:
|
||||
name = name_extractor(case)
|
||||
if options.module:
|
||||
name = options.module + "::" + name
|
||||
if name not in test_cases:
|
||||
test_cases[name] = [None] * setsCount
|
||||
test_cases[name][i] = case
|
||||
|
||||
# build table
|
||||
getter = metrix_table[options.metric][1]
|
||||
getter_score = metrix_table["score"][1] if options.calc_score else None
|
||||
getter_p = metrix_table[options.metric + "%"][1] if options.calc_relatives else None
|
||||
getter_cr = metrix_table[options.metric + "$"][1] if options.calc_cr else None
|
||||
tbl = table('%s (%s)' % (metrix_table[options.metric][0], options.units), options.format)
|
||||
|
||||
# header
|
||||
tbl.newColumn("name", "Name of Test", align = "left", cssclass = "col_name")
|
||||
for i in range(setsCount):
|
||||
tbl.newColumn(str(i), getSetName(test_sets[i], i, options.columns, False), align = "center")
|
||||
|
||||
def addHeaderColumns(suffix, description, cssclass):
|
||||
for link in options.regressions:
|
||||
(i, ref, reverse, addColor) = link
|
||||
if reverse:
|
||||
i, ref = ref, i
|
||||
current_set = test_sets[i]
|
||||
current = getSetName(current_set, i, options.columns)
|
||||
if ref >= 0:
|
||||
reference_set = test_sets[ref]
|
||||
reference = getSetName(reference_set, ref, options.columns)
|
||||
else:
|
||||
reference = 'previous'
|
||||
tbl.newColumn(str(i) + '-' + str(ref) + suffix, '%s\nvs\n%s\n(%s)' % (current, reference, description), align='center', cssclass=cssclass)
|
||||
|
||||
if options.calc_cr:
|
||||
addHeaderColumns(suffix='$', description='cycles reduction', cssclass='col_cr')
|
||||
if options.calc_relatives:
|
||||
addHeaderColumns(suffix='%', description='x-factor', cssclass='col_rel')
|
||||
if options.calc_score:
|
||||
addHeaderColumns(suffix='S', description='score', cssclass='col_name')
|
||||
|
||||
# rows
|
||||
prevGroupName = None
|
||||
needNewRow = True
|
||||
lastRow = None
|
||||
for name in sorted(test_cases.keys(), key=alphanum_keyselector):
|
||||
cases = test_cases[name]
|
||||
if needNewRow:
|
||||
lastRow = tbl.newRow()
|
||||
if not options.showall:
|
||||
needNewRow = False
|
||||
tbl.newCell("name", name)
|
||||
|
||||
groupName = next(c for c in cases if c).shortName()
|
||||
if groupName != prevGroupName:
|
||||
prop = lastRow.props.get("cssclass", "")
|
||||
if "firstingroup" not in prop:
|
||||
lastRow.props["cssclass"] = prop + " firstingroup"
|
||||
prevGroupName = groupName
|
||||
|
||||
for i in range(setsCount):
|
||||
case = cases[i]
|
||||
if case is None:
|
||||
if options.intersect_logs:
|
||||
needNewRow = False
|
||||
break
|
||||
tbl.newCell(str(i), "-")
|
||||
else:
|
||||
status = case.get("status")
|
||||
if status != "run":
|
||||
tbl.newCell(str(i), status, color="red")
|
||||
else:
|
||||
val = getter(case, cases[0], options.units)
|
||||
if val:
|
||||
needNewRow = True
|
||||
tbl.newCell(str(i), formatValue(val, options.metric, show_units), val)
|
||||
|
||||
if needNewRow:
|
||||
for link in options.regressions:
|
||||
(i, reference, reverse, addColor) = link
|
||||
if reverse:
|
||||
i, reference = reference, i
|
||||
tblCellID = str(i) + '-' + str(reference)
|
||||
case = cases[i]
|
||||
if case is None:
|
||||
if options.calc_relatives:
|
||||
tbl.newCell(tblCellID + "%", "-")
|
||||
if options.calc_cr:
|
||||
tbl.newCell(tblCellID + "$", "-")
|
||||
if options.calc_score:
|
||||
tbl.newCell(tblCellID + "$", "-")
|
||||
else:
|
||||
status = case.get("status")
|
||||
if status != "run":
|
||||
tbl.newCell(str(i), status, color="red")
|
||||
if status != "notrun":
|
||||
needNewRow = True
|
||||
if options.calc_relatives:
|
||||
tbl.newCell(tblCellID + "%", "-", color="red")
|
||||
if options.calc_cr:
|
||||
tbl.newCell(tblCellID + "$", "-", color="red")
|
||||
if options.calc_score:
|
||||
tbl.newCell(tblCellID + "S", "-", color="red")
|
||||
else:
|
||||
val = getter(case, cases[0], options.units)
|
||||
def getRegression(fn):
|
||||
if fn and val:
|
||||
for j in reversed(range(i)) if reference < 0 else [reference]:
|
||||
r = cases[j]
|
||||
if r is not None and r.get("status") == 'run':
|
||||
return fn(case, r, options.units)
|
||||
valp = getRegression(getter_p) if options.calc_relatives or options.progress_mode else None
|
||||
valcr = getRegression(getter_cr) if options.calc_cr else None
|
||||
val_score = getRegression(getter_score) if options.calc_score else None
|
||||
if not valp:
|
||||
color = None
|
||||
elif valp > 1.05:
|
||||
color = 'green'
|
||||
elif valp < 0.95:
|
||||
color = 'red'
|
||||
else:
|
||||
color = None
|
||||
if addColor:
|
||||
if not reverse:
|
||||
tbl.newCell(str(i), formatValue(val, options.metric, show_units), val, color=color)
|
||||
else:
|
||||
r = cases[reference]
|
||||
if r is not None and r.get("status") == 'run':
|
||||
val = getter(r, cases[0], options.units)
|
||||
tbl.newCell(str(reference), formatValue(val, options.metric, show_units), val, color=color)
|
||||
if options.calc_relatives:
|
||||
tbl.newCell(tblCellID + "%", formatValue(valp, "%"), valp, color=color, bold=color)
|
||||
if options.calc_cr:
|
||||
tbl.newCell(tblCellID + "$", formatValue(valcr, "$"), valcr, color=color, bold=color)
|
||||
if options.calc_score:
|
||||
tbl.newCell(tblCellID + "S", formatValue(val_score, "S"), val_score, color = color, bold = color)
|
||||
|
||||
if not needNewRow:
|
||||
tbl.trimLastRow()
|
||||
|
||||
if options.regressionsOnly:
|
||||
for r in reversed(range(len(tbl.rows))):
|
||||
for i in range(1, len(options.regressions) + 1):
|
||||
val = tbl.rows[r].cells[len(tbl.rows[r].cells) - i].value
|
||||
if val is not None and val < float(options.regressionsOnly):
|
||||
break
|
||||
else:
|
||||
tbl.rows.pop(r)
|
||||
|
||||
# output table
|
||||
if options.generateHtml:
|
||||
if options.format == "moinwiki":
|
||||
tbl.htmlPrintTable(sys.stdout, True)
|
||||
else:
|
||||
htmlPrintHeader(sys.stdout, "Summary report for %s tests from %s test logs" % (len(test_cases), setsCount))
|
||||
tbl.htmlPrintTable(sys.stdout)
|
||||
htmlPrintFooter(sys.stdout)
|
||||
else:
|
||||
tbl.consolePrintTable(sys.stdout)
|
||||
|
||||
if options.regressionsOnly:
|
||||
sys.exit(len(tbl.rows))
|
818
3rdparty/opencv-4.5.4/modules/ts/misc/table_formatter.py
vendored
Executable file
818
3rdparty/opencv-4.5.4/modules/ts/misc/table_formatter.py
vendored
Executable file
@ -0,0 +1,818 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
import sys, re, os.path, stat, math
|
||||
try:
|
||||
from html import escape
|
||||
except ImportError:
|
||||
from cgi import escape # Python 2.7
|
||||
from optparse import OptionParser
|
||||
from color import getColorizer, dummyColorizer
|
||||
|
||||
class tblCell(object):
|
||||
def __init__(self, text, value = None, props = None):
|
||||
self.text = text
|
||||
self.value = value
|
||||
self.props = props
|
||||
|
||||
class tblColumn(object):
|
||||
def __init__(self, caption, title = None, props = None):
|
||||
self.text = caption
|
||||
self.title = title
|
||||
self.props = props
|
||||
|
||||
class tblRow(object):
|
||||
def __init__(self, colsNum, props = None):
|
||||
self.cells = [None] * colsNum
|
||||
self.props = props
|
||||
|
||||
def htmlEncode(str):
|
||||
return '<br/>'.join([escape(s) for s in str])
|
||||
|
||||
class table(object):
|
||||
def_align = "left"
|
||||
def_valign = "middle"
|
||||
def_color = None
|
||||
def_colspan = 1
|
||||
def_rowspan = 1
|
||||
def_bold = False
|
||||
def_italic = False
|
||||
def_text="-"
|
||||
|
||||
def __init__(self, caption = None, format=None):
|
||||
self.format = format
|
||||
self.is_markdown = self.format == 'markdown'
|
||||
self.is_tabs = self.format == 'tabs'
|
||||
self.columns = {}
|
||||
self.rows = []
|
||||
self.ridx = -1;
|
||||
self.caption = caption
|
||||
pass
|
||||
|
||||
def newRow(self, **properties):
|
||||
if len(self.rows) - 1 == self.ridx:
|
||||
self.rows.append(tblRow(len(self.columns), properties))
|
||||
else:
|
||||
self.rows[self.ridx + 1].props = properties
|
||||
self.ridx += 1
|
||||
return self.rows[self.ridx]
|
||||
|
||||
def trimLastRow(self):
|
||||
if self.rows:
|
||||
self.rows.pop()
|
||||
if self.ridx >= len(self.rows):
|
||||
self.ridx = len(self.rows) - 1
|
||||
|
||||
def newColumn(self, name, caption, title = None, **properties):
|
||||
if name in self.columns:
|
||||
index = self.columns[name].index
|
||||
else:
|
||||
index = len(self.columns)
|
||||
if isinstance(caption, tblColumn):
|
||||
caption.index = index
|
||||
self.columns[name] = caption
|
||||
return caption
|
||||
else:
|
||||
col = tblColumn(caption, title, properties)
|
||||
col.index = index
|
||||
self.columns[name] = col
|
||||
return col
|
||||
|
||||
def getColumn(self, name):
|
||||
if isinstance(name, str):
|
||||
return self.columns.get(name, None)
|
||||
else:
|
||||
vals = [v for v in self.columns.values() if v.index == name]
|
||||
if vals:
|
||||
return vals[0]
|
||||
return None
|
||||
|
||||
def newCell(self, col_name, text, value = None, **properties):
|
||||
if self.ridx < 0:
|
||||
self.newRow()
|
||||
col = self.getColumn(col_name)
|
||||
row = self.rows[self.ridx]
|
||||
if not col:
|
||||
return None
|
||||
if isinstance(text, tblCell):
|
||||
cl = text
|
||||
else:
|
||||
cl = tblCell(text, value, properties)
|
||||
row.cells[col.index] = cl
|
||||
return cl
|
||||
|
||||
def layoutTable(self):
|
||||
columns = self.columns.values()
|
||||
columns = sorted(columns, key=lambda c: c.index)
|
||||
|
||||
colspanned = []
|
||||
rowspanned = []
|
||||
|
||||
self.headerHeight = 1
|
||||
rowsToAppend = 0
|
||||
|
||||
for col in columns:
|
||||
self.measureCell(col)
|
||||
if col.height > self.headerHeight:
|
||||
self.headerHeight = col.height
|
||||
col.minwidth = col.width
|
||||
col.line = None
|
||||
|
||||
for r in range(len(self.rows)):
|
||||
row = self.rows[r]
|
||||
row.minheight = 1
|
||||
for i in range(len(row.cells)):
|
||||
cell = row.cells[i]
|
||||
if row.cells[i] is None:
|
||||
continue
|
||||
cell.line = None
|
||||
self.measureCell(cell)
|
||||
colspan = int(self.getValue("colspan", cell))
|
||||
rowspan = int(self.getValue("rowspan", cell))
|
||||
if colspan > 1:
|
||||
colspanned.append((r,i))
|
||||
if i + colspan > len(columns):
|
||||
colspan = len(columns) - i
|
||||
cell.colspan = colspan
|
||||
#clear spanned cells
|
||||
for j in range(i+1, min(len(row.cells), i + colspan)):
|
||||
row.cells[j] = None
|
||||
elif columns[i].minwidth < cell.width:
|
||||
columns[i].minwidth = cell.width
|
||||
if rowspan > 1:
|
||||
rowspanned.append((r,i))
|
||||
rowsToAppend2 = r + colspan - len(self.rows)
|
||||
if rowsToAppend2 > rowsToAppend:
|
||||
rowsToAppend = rowsToAppend2
|
||||
cell.rowspan = rowspan
|
||||
#clear spanned cells
|
||||
for j in range(r+1, min(len(self.rows), r + rowspan)):
|
||||
if len(self.rows[j].cells) > i:
|
||||
self.rows[j].cells[i] = None
|
||||
elif row.minheight < cell.height:
|
||||
row.minheight = cell.height
|
||||
|
||||
self.ridx = len(self.rows) - 1
|
||||
for r in range(rowsToAppend):
|
||||
self.newRow()
|
||||
self.rows[len(self.rows) - 1].minheight = 1
|
||||
|
||||
while colspanned:
|
||||
colspanned_new = []
|
||||
for r, c in colspanned:
|
||||
cell = self.rows[r].cells[c]
|
||||
sum([col.minwidth for col in columns[c:c + cell.colspan]])
|
||||
cell.awailable = sum([col.minwidth for col in columns[c:c + cell.colspan]]) + cell.colspan - 1
|
||||
if cell.awailable < cell.width:
|
||||
colspanned_new.append((r,c))
|
||||
colspanned = colspanned_new
|
||||
if colspanned:
|
||||
r,c = colspanned[0]
|
||||
cell = self.rows[r].cells[c]
|
||||
cols = columns[c:c + cell.colspan]
|
||||
total = cell.awailable - cell.colspan + 1
|
||||
budget = cell.width - cell.awailable
|
||||
spent = 0
|
||||
s = 0
|
||||
for col in cols:
|
||||
s += col.minwidth
|
||||
addition = s * budget / total - spent
|
||||
spent += addition
|
||||
col.minwidth += addition
|
||||
|
||||
while rowspanned:
|
||||
rowspanned_new = []
|
||||
for r, c in rowspanned:
|
||||
cell = self.rows[r].cells[c]
|
||||
cell.awailable = sum([row.minheight for row in self.rows[r:r + cell.rowspan]])
|
||||
if cell.awailable < cell.height:
|
||||
rowspanned_new.append((r,c))
|
||||
rowspanned = rowspanned_new
|
||||
if rowspanned:
|
||||
r,c = rowspanned[0]
|
||||
cell = self.rows[r].cells[c]
|
||||
rows = self.rows[r:r + cell.rowspan]
|
||||
total = cell.awailable
|
||||
budget = cell.height - cell.awailable
|
||||
spent = 0
|
||||
s = 0
|
||||
for row in rows:
|
||||
s += row.minheight
|
||||
addition = s * budget / total - spent
|
||||
spent += addition
|
||||
row.minheight += addition
|
||||
|
||||
return columns
|
||||
|
||||
def measureCell(self, cell):
|
||||
text = self.getValue("text", cell)
|
||||
cell.text = self.reformatTextValue(text)
|
||||
cell.height = len(cell.text)
|
||||
cell.width = len(max(cell.text, key = lambda line: len(line)))
|
||||
|
||||
def reformatTextValue(self, value):
|
||||
if sys.version_info >= (2,7):
|
||||
unicode = str
|
||||
if isinstance(value, str):
|
||||
vstr = value
|
||||
elif isinstance(value, unicode):
|
||||
vstr = str(value)
|
||||
else:
|
||||
try:
|
||||
vstr = '\n'.join([str(v) for v in value])
|
||||
except TypeError:
|
||||
vstr = str(value)
|
||||
return vstr.splitlines()
|
||||
|
||||
def adjustColWidth(self, cols, width):
|
||||
total = sum([c.minWidth for c in cols])
|
||||
if total + len(cols) - 1 >= width:
|
||||
return
|
||||
budget = width - len(cols) + 1 - total
|
||||
spent = 0
|
||||
s = 0
|
||||
for col in cols:
|
||||
s += col.minWidth
|
||||
addition = s * budget / total - spent
|
||||
spent += addition
|
||||
col.minWidth += addition
|
||||
|
||||
def getValue(self, name, *elements):
|
||||
for el in elements:
|
||||
try:
|
||||
return getattr(el, name)
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
val = el.props[name]
|
||||
if val:
|
||||
return val
|
||||
except AttributeError:
|
||||
pass
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
return getattr(self.__class__, "def_" + name)
|
||||
except AttributeError:
|
||||
return None
|
||||
|
||||
def consolePrintTable(self, out):
|
||||
columns = self.layoutTable()
|
||||
colrizer = getColorizer(out) if not (self.is_markdown or self.is_tabs) else dummyColorizer(out)
|
||||
|
||||
if self.caption:
|
||||
out.write("%s%s%s" % ( os.linesep, os.linesep.join(self.reformatTextValue(self.caption)), os.linesep * 2))
|
||||
|
||||
headerRow = tblRow(len(columns), {"align": "center", "valign": "top", "bold": True, "header": True})
|
||||
headerRow.cells = columns
|
||||
headerRow.minheight = self.headerHeight
|
||||
|
||||
self.consolePrintRow2(colrizer, headerRow, columns)
|
||||
|
||||
for i in range(0, len(self.rows)):
|
||||
self.consolePrintRow2(colrizer, i, columns)
|
||||
|
||||
def consolePrintRow2(self, out, r, columns):
|
||||
if isinstance(r, tblRow):
|
||||
row = r
|
||||
r = -1
|
||||
else:
|
||||
row = self.rows[r]
|
||||
|
||||
#evaluate initial values for line numbers
|
||||
i = 0
|
||||
while i < len(row.cells):
|
||||
cell = row.cells[i]
|
||||
colspan = self.getValue("colspan", cell)
|
||||
if cell is not None:
|
||||
cell.wspace = sum([col.minwidth for col in columns[i:i + colspan]]) + colspan - 1
|
||||
if cell.line is None:
|
||||
if r < 0:
|
||||
rows = [row]
|
||||
else:
|
||||
rows = self.rows[r:r + self.getValue("rowspan", cell)]
|
||||
cell.line = self.evalLine(cell, rows, columns[i])
|
||||
if len(rows) > 1:
|
||||
for rw in rows:
|
||||
rw.cells[i] = cell
|
||||
i += colspan
|
||||
|
||||
#print content
|
||||
if self.is_markdown:
|
||||
out.write("|")
|
||||
for c in row.cells:
|
||||
text = ' '.join(self.getValue('text', c) or [])
|
||||
out.write(text + "|")
|
||||
out.write(os.linesep)
|
||||
elif self.is_tabs:
|
||||
cols_to_join=[' '.join(self.getValue('text', c) or []) for c in row.cells]
|
||||
out.write('\t'.join(cols_to_join))
|
||||
out.write(os.linesep)
|
||||
else:
|
||||
for ln in range(row.minheight):
|
||||
i = 0
|
||||
while i < len(row.cells):
|
||||
if i > 0:
|
||||
out.write(" ")
|
||||
cell = row.cells[i]
|
||||
column = columns[i]
|
||||
if cell is None:
|
||||
out.write(" " * column.minwidth)
|
||||
i += 1
|
||||
else:
|
||||
self.consolePrintLine(cell, row, column, out)
|
||||
i += self.getValue("colspan", cell)
|
||||
if self.is_markdown:
|
||||
out.write("|")
|
||||
out.write(os.linesep)
|
||||
|
||||
if self.is_markdown and row.props.get('header', False):
|
||||
out.write("|")
|
||||
for th in row.cells:
|
||||
align = self.getValue("align", th)
|
||||
if align == 'center':
|
||||
out.write(":-:|")
|
||||
elif align == 'right':
|
||||
out.write("--:|")
|
||||
else:
|
||||
out.write("---|")
|
||||
out.write(os.linesep)
|
||||
|
||||
def consolePrintLine(self, cell, row, column, out):
|
||||
if cell.line < 0 or cell.line >= cell.height:
|
||||
line = ""
|
||||
else:
|
||||
line = cell.text[cell.line]
|
||||
width = cell.wspace
|
||||
align = self.getValue("align", ((None, cell)[isinstance(cell, tblCell)]), row, column)
|
||||
|
||||
if align == "right":
|
||||
pattern = "%" + str(width) + "s"
|
||||
elif align == "center":
|
||||
pattern = "%" + str((width - len(line)) // 2 + len(line)) + "s" + " " * (width - len(line) - (width - len(line)) // 2)
|
||||
else:
|
||||
pattern = "%-" + str(width) + "s"
|
||||
|
||||
out.write(pattern % line, color = self.getValue("color", cell, row, column))
|
||||
cell.line += 1
|
||||
|
||||
def evalLine(self, cell, rows, column):
|
||||
height = cell.height
|
||||
valign = self.getValue("valign", cell, rows[0], column)
|
||||
space = sum([row.minheight for row in rows])
|
||||
if valign == "bottom":
|
||||
return height - space
|
||||
if valign == "middle":
|
||||
return (height - space + 1) // 2
|
||||
return 0
|
||||
|
||||
def htmlPrintTable(self, out, embeedcss = False):
|
||||
columns = self.layoutTable()
|
||||
|
||||
if embeedcss:
|
||||
out.write("<div style=\"font-family: Lucida Console, Courier New, Courier;font-size: 16px;color:#3e4758;\">\n<table style=\"background:none repeat scroll 0 0 #FFFFFF;border-collapse:collapse;font-family:'Lucida Sans Unicode','Lucida Grande',Sans-Serif;font-size:14px;margin:20px;text-align:left;width:480px;margin-left: auto;margin-right: auto;white-space:nowrap;\">\n")
|
||||
else:
|
||||
out.write("<div class=\"tableFormatter\">\n<table class=\"tbl\">\n")
|
||||
if self.caption:
|
||||
if embeedcss:
|
||||
out.write(" <caption style=\"font:italic 16px 'Trebuchet MS',Verdana,Arial,Helvetica,sans-serif;padding:0 0 5px;text-align:right;white-space:normal;\">%s</caption>\n" % htmlEncode(self.reformatTextValue(self.caption)))
|
||||
else:
|
||||
out.write(" <caption>%s</caption>\n" % htmlEncode(self.reformatTextValue(self.caption)))
|
||||
out.write(" <thead>\n")
|
||||
|
||||
headerRow = tblRow(len(columns), {"align": "center", "valign": "top", "bold": True, "header": True})
|
||||
headerRow.cells = columns
|
||||
|
||||
header_rows = [headerRow]
|
||||
header_rows.extend([row for row in self.rows if self.getValue("header")])
|
||||
last_row = header_rows[len(header_rows) - 1]
|
||||
|
||||
for row in header_rows:
|
||||
out.write(" <tr>\n")
|
||||
for th in row.cells:
|
||||
align = self.getValue("align", ((None, th)[isinstance(th, tblCell)]), row, row)
|
||||
valign = self.getValue("valign", th, row)
|
||||
cssclass = self.getValue("cssclass", th)
|
||||
attr = ""
|
||||
if align:
|
||||
attr += " align=\"%s\"" % align
|
||||
if valign:
|
||||
attr += " valign=\"%s\"" % valign
|
||||
if cssclass:
|
||||
attr += " class=\"%s\"" % cssclass
|
||||
css = ""
|
||||
if embeedcss:
|
||||
css = " style=\"border:none;color:#003399;font-size:16px;font-weight:normal;white-space:nowrap;padding:3px 10px;\""
|
||||
if row == last_row:
|
||||
css = css[:-1] + "padding-bottom:5px;\""
|
||||
out.write(" <th%s%s>\n" % (attr, css))
|
||||
if th is not None:
|
||||
out.write(" %s\n" % htmlEncode(th.text))
|
||||
out.write(" </th>\n")
|
||||
out.write(" </tr>\n")
|
||||
|
||||
out.write(" </thead>\n <tbody>\n")
|
||||
|
||||
rows = [row for row in self.rows if not self.getValue("header")]
|
||||
for r in range(len(rows)):
|
||||
row = rows[r]
|
||||
rowattr = ""
|
||||
cssclass = self.getValue("cssclass", row)
|
||||
if cssclass:
|
||||
rowattr += " class=\"%s\"" % cssclass
|
||||
out.write(" <tr%s>\n" % (rowattr))
|
||||
i = 0
|
||||
while i < len(row.cells):
|
||||
column = columns[i]
|
||||
td = row.cells[i]
|
||||
if isinstance(td, int):
|
||||
i += td
|
||||
continue
|
||||
colspan = self.getValue("colspan", td)
|
||||
rowspan = self.getValue("rowspan", td)
|
||||
align = self.getValue("align", td, row, column)
|
||||
valign = self.getValue("valign", td, row, column)
|
||||
color = self.getValue("color", td, row, column)
|
||||
bold = self.getValue("bold", td, row, column)
|
||||
italic = self.getValue("italic", td, row, column)
|
||||
style = ""
|
||||
attr = ""
|
||||
if color:
|
||||
style += "color:%s;" % color
|
||||
if bold:
|
||||
style += "font-weight: bold;"
|
||||
if italic:
|
||||
style += "font-style: italic;"
|
||||
if align and align != "left":
|
||||
attr += " align=\"%s\"" % align
|
||||
if valign and valign != "middle":
|
||||
attr += " valign=\"%s\"" % valign
|
||||
if colspan > 1:
|
||||
attr += " colspan=\"%s\"" % colspan
|
||||
if rowspan > 1:
|
||||
attr += " rowspan=\"%s\"" % rowspan
|
||||
for q in range(r+1, min(r+rowspan, len(rows))):
|
||||
rows[q].cells[i] = colspan
|
||||
if style:
|
||||
attr += " style=\"%s\"" % style
|
||||
css = ""
|
||||
if embeedcss:
|
||||
css = " style=\"border:none;border-bottom:1px solid #CCCCCC;color:#666699;padding:6px 8px;white-space:nowrap;\""
|
||||
if r == 0:
|
||||
css = css[:-1] + "border-top:2px solid #6678B1;\""
|
||||
out.write(" <td%s%s>\n" % (attr, css))
|
||||
if td is not None:
|
||||
out.write(" %s\n" % htmlEncode(td.text))
|
||||
out.write(" </td>\n")
|
||||
i += colspan
|
||||
out.write(" </tr>\n")
|
||||
|
||||
out.write(" </tbody>\n</table>\n</div>\n")
|
||||
|
||||
def htmlPrintHeader(out, title = None):
|
||||
if title:
|
||||
titletag = "<title>%s</title>\n" % htmlEncode([str(title)])
|
||||
else:
|
||||
titletag = ""
|
||||
out.write("""<!DOCTYPE HTML>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=us-ascii">
|
||||
%s<style type="text/css">
|
||||
html, body {font-family: Lucida Console, Courier New, Courier;font-size: 16px;color:#3e4758;}
|
||||
.tbl{background:none repeat scroll 0 0 #FFFFFF;border-collapse:collapse;font-family:"Lucida Sans Unicode","Lucida Grande",Sans-Serif;font-size:14px;margin:20px;text-align:left;width:480px;margin-left: auto;margin-right: auto;white-space:nowrap;}
|
||||
.tbl span{display:block;white-space:nowrap;}
|
||||
.tbl thead tr:last-child th {padding-bottom:5px;}
|
||||
.tbl tbody tr:first-child td {border-top:3px solid #6678B1;}
|
||||
.tbl th{border:none;color:#003399;font-size:16px;font-weight:normal;white-space:nowrap;padding:3px 10px;}
|
||||
.tbl td{border:none;border-bottom:1px solid #CCCCCC;color:#666699;padding:6px 8px;white-space:nowrap;}
|
||||
.tbl tbody tr:hover td{color:#000099;}
|
||||
.tbl caption{font:italic 16px "Trebuchet MS",Verdana,Arial,Helvetica,sans-serif;padding:0 0 5px;text-align:right;white-space:normal;}
|
||||
.firstingroup {border-top:2px solid #6678B1;}
|
||||
</style>
|
||||
<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js"></script>
|
||||
<script type="text/javascript">
|
||||
function abs(val) { return val < 0 ? -val : val }
|
||||
$(function(){
|
||||
//generate filter rows
|
||||
$("div.tableFormatter table.tbl").each(function(tblIdx, tbl) {
|
||||
var head = $("thead", tbl)
|
||||
var filters = $("<tr></tr>")
|
||||
var hasAny = false
|
||||
$("tr:first th", head).each(function(colIdx, col) {
|
||||
col = $(col)
|
||||
var cell
|
||||
var id = "t" + tblIdx + "r" + colIdx
|
||||
if (col.hasClass("col_name")){
|
||||
cell = $("<th><input id='" + id + "' name='" + id + "' type='text' style='width:100%%' class='filter_col_name' title='Regular expression for name filtering ("resize.*640x480" - resize tests on VGA resolution)'></input></th>")
|
||||
hasAny = true
|
||||
}
|
||||
else if (col.hasClass("col_rel")){
|
||||
cell = $("<th><input id='" + id + "' name='" + id + "' type='text' style='width:100%%' class='filter_col_rel' title='Filter out lines with a x-factor of acceleration less than Nx'></input></th>")
|
||||
hasAny = true
|
||||
}
|
||||
else if (col.hasClass("col_cr")){
|
||||
cell = $("<th><input id='" + id + "' name='" + id + "' type='text' style='width:100%%' class='filter_col_cr' title='Filter out lines with a percentage of acceleration less than N%%'></input></th>")
|
||||
hasAny = true
|
||||
}
|
||||
else
|
||||
cell = $("<th></th>")
|
||||
cell.appendTo(filters)
|
||||
})
|
||||
|
||||
if (hasAny){
|
||||
$(tbl).wrap("<form id='form_t" + tblIdx + "' method='get' action=''></form>")
|
||||
$("<input it='test' type='submit' value='Apply Filters' style='margin-left:10px;'></input>")
|
||||
.appendTo($("th:last", filters.appendTo(head)))
|
||||
}
|
||||
})
|
||||
|
||||
//get filter values
|
||||
var vars = []
|
||||
var hashes = window.location.href.slice(window.location.href.indexOf('?') + 1).split('&')
|
||||
for(var i = 0; i < hashes.length; ++i)
|
||||
{
|
||||
hash = hashes[i].split('=')
|
||||
vars.push(decodeURIComponent(hash[0]))
|
||||
vars[decodeURIComponent(hash[0])] = decodeURIComponent(hash[1]);
|
||||
}
|
||||
|
||||
//set filter values
|
||||
for(var i = 0; i < vars.length; ++i)
|
||||
$("#" + vars[i]).val(vars[vars[i]])
|
||||
|
||||
//apply filters
|
||||
$("div.tableFormatter table.tbl").each(function(tblIdx, tbl) {
|
||||
filters = $("input:text", tbl)
|
||||
var predicate = function(row) {return true;}
|
||||
var empty = true
|
||||
$.each($("input:text", tbl), function(i, flt) {
|
||||
flt = $(flt)
|
||||
var val = flt.val()
|
||||
var pred = predicate;
|
||||
if(val) {
|
||||
empty = false
|
||||
var colIdx = parseInt(flt.attr("id").slice(flt.attr("id").indexOf('r') + 1))
|
||||
if(flt.hasClass("filter_col_name")) {
|
||||
var re = new RegExp(val);
|
||||
predicate = function(row) {
|
||||
if (re.exec($(row.get(colIdx)).text()) == null)
|
||||
return false
|
||||
return pred(row)
|
||||
}
|
||||
} else if(flt.hasClass("filter_col_rel")) {
|
||||
var percent = parseFloat(val)
|
||||
if (percent < 0) {
|
||||
predicate = function(row) {
|
||||
var val = parseFloat($(row.get(colIdx)).text())
|
||||
if (!val || val >= 1 || val > 1+percent)
|
||||
return false
|
||||
return pred(row)
|
||||
}
|
||||
} else {
|
||||
predicate = function(row) {
|
||||
var val = parseFloat($(row.get(colIdx)).text())
|
||||
if (!val || val < percent)
|
||||
return false
|
||||
return pred(row)
|
||||
}
|
||||
}
|
||||
} else if(flt.hasClass("filter_col_cr")) {
|
||||
var percent = parseFloat(val)
|
||||
predicate = function(row) {
|
||||
var val = parseFloat($(row.get(colIdx)).text())
|
||||
if (!val || val < percent)
|
||||
return false
|
||||
return pred(row)
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
if (!empty){
|
||||
$("tbody tr", tbl).each(function (i, tbl_row) {
|
||||
if(!predicate($("td", tbl_row)))
|
||||
$(tbl_row).remove()
|
||||
})
|
||||
if($("tbody tr", tbl).length == 0) {
|
||||
$("<tr><td colspan='"+$("thead tr:first th", tbl).length+"'>No results matching your search criteria</td></tr>")
|
||||
.appendTo($("tbody", tbl))
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
""" % titletag)
|
||||
|
||||
def htmlPrintFooter(out):
|
||||
out.write("</body>\n</html>")
|
||||
|
||||
def getStdoutFilename():
|
||||
try:
|
||||
if os.name == "nt":
|
||||
import msvcrt, ctypes
|
||||
handle = msvcrt.get_osfhandle(sys.stdout.fileno())
|
||||
size = ctypes.c_ulong(1024)
|
||||
nameBuffer = ctypes.create_string_buffer(size.value)
|
||||
ctypes.windll.kernel32.GetFinalPathNameByHandleA(handle, nameBuffer, size, 4)
|
||||
return nameBuffer.value
|
||||
else:
|
||||
return os.readlink('/proc/self/fd/1')
|
||||
except:
|
||||
return ""
|
||||
|
||||
def detectHtmlOutputType(requestedType):
|
||||
if requestedType in ['txt', 'markdown']:
|
||||
return False
|
||||
elif requestedType in ["html", "moinwiki"]:
|
||||
return True
|
||||
else:
|
||||
if sys.stdout.isatty():
|
||||
return False
|
||||
else:
|
||||
outname = getStdoutFilename()
|
||||
if outname:
|
||||
if outname.endswith(".htm") or outname.endswith(".html"):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
|
||||
def getRelativeVal(test, test0, metric):
|
||||
if not test or not test0:
|
||||
return None
|
||||
val0 = test0.get(metric, "s")
|
||||
if not val0:
|
||||
return None
|
||||
val = test.get(metric, "s")
|
||||
if not val or val == 0:
|
||||
return None
|
||||
return float(val0)/val
|
||||
|
||||
def getCycleReduction(test, test0, metric):
|
||||
if not test or not test0:
|
||||
return None
|
||||
val0 = test0.get(metric, "s")
|
||||
if not val0 or val0 == 0:
|
||||
return None
|
||||
val = test.get(metric, "s")
|
||||
if not val:
|
||||
return None
|
||||
return (1.0-float(val)/val0)*100
|
||||
|
||||
def getScore(test, test0, metric):
|
||||
if not test or not test0:
|
||||
return None
|
||||
m0 = float(test.get("gmean", None))
|
||||
m1 = float(test0.get("gmean", None))
|
||||
if m0 == 0 or m1 == 0:
|
||||
return None
|
||||
s0 = float(test.get("gstddev", None))
|
||||
s1 = float(test0.get("gstddev", None))
|
||||
s = math.sqrt(s0*s0 + s1*s1)
|
||||
m0 = math.log(m0)
|
||||
m1 = math.log(m1)
|
||||
if s == 0:
|
||||
return None
|
||||
return (m0-m1)/s
|
||||
|
||||
metrix_table = \
|
||||
{
|
||||
"name": ("Name of Test", lambda test,test0,units: str(test)),
|
||||
|
||||
"samples": ("Number of\ncollected samples", lambda test,test0,units: test.get("samples", units)),
|
||||
"outliers": ("Number of\noutliers", lambda test,test0,units: test.get("outliers", units)),
|
||||
|
||||
"gmean": ("Geometric mean", lambda test,test0,units: test.get("gmean", units)),
|
||||
"mean": ("Mean", lambda test,test0,units: test.get("mean", units)),
|
||||
"min": ("Min", lambda test,test0,units: test.get("min", units)),
|
||||
"median": ("Median", lambda test,test0,units: test.get("median", units)),
|
||||
"stddev": ("Standard deviation", lambda test,test0,units: test.get("stddev", units)),
|
||||
"gstddev": ("Standard deviation of Ln(time)", lambda test,test0,units: test.get("gstddev")),
|
||||
|
||||
"gmean%": ("Geometric mean (relative)", lambda test,test0,units: getRelativeVal(test, test0, "gmean")),
|
||||
"mean%": ("Mean (relative)", lambda test,test0,units: getRelativeVal(test, test0, "mean")),
|
||||
"min%": ("Min (relative)", lambda test,test0,units: getRelativeVal(test, test0, "min")),
|
||||
"median%": ("Median (relative)", lambda test,test0,units: getRelativeVal(test, test0, "median")),
|
||||
"stddev%": ("Standard deviation (relative)", lambda test,test0,units: getRelativeVal(test, test0, "stddev")),
|
||||
"gstddev%": ("Standard deviation of Ln(time) (relative)", lambda test,test0,units: getRelativeVal(test, test0, "gstddev")),
|
||||
|
||||
"gmean$": ("Geometric mean (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "gmean")),
|
||||
"mean$": ("Mean (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "mean")),
|
||||
"min$": ("Min (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "min")),
|
||||
"median$": ("Median (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "median")),
|
||||
"stddev$": ("Standard deviation (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "stddev")),
|
||||
"gstddev$": ("Standard deviation of Ln(time) (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "gstddev")),
|
||||
|
||||
"score": ("SCORE", lambda test,test0,units: getScore(test, test0, "gstddev")),
|
||||
}
|
||||
|
||||
def formatValue(val, metric, units = None):
|
||||
if val is None:
|
||||
return "-"
|
||||
if metric.endswith("%"):
|
||||
return "%.2f" % val
|
||||
if metric.endswith("$"):
|
||||
return "%.2f%%" % val
|
||||
if metric.endswith("S"):
|
||||
if val > 3.5:
|
||||
return "SLOWER"
|
||||
if val < -3.5:
|
||||
return "FASTER"
|
||||
if val > -1.5 and val < 1.5:
|
||||
return " "
|
||||
if val < 0:
|
||||
return "faster"
|
||||
if val > 0:
|
||||
return "slower"
|
||||
#return "%.4f" % val
|
||||
if units:
|
||||
return "%.3f %s" % (val, units)
|
||||
else:
|
||||
return "%.3f" % val
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage:\n", os.path.basename(sys.argv[0]), "<log_name>.xml")
|
||||
exit(0)
|
||||
|
||||
parser = OptionParser()
|
||||
parser.add_option("-o", "--output", dest="format", help="output results in text format (can be 'txt', 'html', 'markdown' or 'auto' - default)", metavar="FMT", default="auto")
|
||||
parser.add_option("-m", "--metric", dest="metric", help="output metric", metavar="NAME", default="gmean")
|
||||
parser.add_option("-u", "--units", dest="units", help="units for output values (s, ms (default), us, ns or ticks)", metavar="UNITS", default="ms")
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
options.generateHtml = detectHtmlOutputType(options.format)
|
||||
if options.metric not in metrix_table:
|
||||
options.metric = "gmean"
|
||||
|
||||
#print options
|
||||
#print args
|
||||
|
||||
# tbl = table()
|
||||
# tbl.newColumn("first", "qqqq", align = "left")
|
||||
# tbl.newColumn("second", "wwww\nz\nx\n")
|
||||
# tbl.newColumn("third", "wwasdas")
|
||||
#
|
||||
# tbl.newCell(0, "ccc111", align = "right")
|
||||
# tbl.newCell(1, "dddd1")
|
||||
# tbl.newCell(2, "8768756754")
|
||||
# tbl.newRow()
|
||||
# tbl.newCell(0, "1\n2\n3\n4\n5\n6\n7", align = "center", colspan = 2, rowspan = 2)
|
||||
# tbl.newCell(2, "xxx\nqqq", align = "center", colspan = 1, valign = "middle")
|
||||
# tbl.newRow()
|
||||
# tbl.newCell(2, "+", align = "center", colspan = 1, valign = "middle")
|
||||
# tbl.newRow()
|
||||
# tbl.newCell(0, "vcvvbasdsadassdasdasv", align = "right", colspan = 2)
|
||||
# tbl.newCell(2, "dddd1")
|
||||
# tbl.newRow()
|
||||
# tbl.newCell(0, "vcvvbv")
|
||||
# tbl.newCell(1, "3445324", align = "right")
|
||||
# tbl.newCell(2, None)
|
||||
# tbl.newCell(1, "0000")
|
||||
# if sys.stdout.isatty():
|
||||
# tbl.consolePrintTable(sys.stdout)
|
||||
# else:
|
||||
# htmlPrintHeader(sys.stdout)
|
||||
# tbl.htmlPrintTable(sys.stdout)
|
||||
# htmlPrintFooter(sys.stdout)
|
||||
|
||||
import testlog_parser
|
||||
|
||||
if options.generateHtml:
|
||||
htmlPrintHeader(sys.stdout, "Tables demo")
|
||||
|
||||
getter = metrix_table[options.metric][1]
|
||||
|
||||
for arg in args:
|
||||
tests = testlog_parser.parseLogFile(arg)
|
||||
tbl = table(arg, format=options.format)
|
||||
tbl.newColumn("name", "Name of Test", align = "left")
|
||||
tbl.newColumn("value", metrix_table[options.metric][0], align = "center", bold = "true")
|
||||
|
||||
for t in sorted(tests):
|
||||
tbl.newRow()
|
||||
tbl.newCell("name", str(t))
|
||||
|
||||
status = t.get("status")
|
||||
if status != "run":
|
||||
tbl.newCell("value", status)
|
||||
else:
|
||||
val = getter(t, None, options.units)
|
||||
if val:
|
||||
if options.metric.endswith("%"):
|
||||
tbl.newCell("value", "%.2f" % val, val)
|
||||
else:
|
||||
tbl.newCell("value", "%.3f %s" % (val, options.units), val)
|
||||
else:
|
||||
tbl.newCell("value", "-")
|
||||
|
||||
if options.generateHtml:
|
||||
tbl.htmlPrintTable(sys.stdout)
|
||||
else:
|
||||
tbl.consolePrintTable(sys.stdout)
|
||||
|
||||
if options.generateHtml:
|
||||
htmlPrintFooter(sys.stdout)
|
232
3rdparty/opencv-4.5.4/modules/ts/misc/testlog_parser.py
vendored
Executable file
232
3rdparty/opencv-4.5.4/modules/ts/misc/testlog_parser.py
vendored
Executable file
@ -0,0 +1,232 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
import collections
|
||||
import re
|
||||
import os.path
|
||||
import sys
|
||||
from xml.dom.minidom import parse
|
||||
|
||||
if sys.version_info > (3,):
|
||||
long = int
|
||||
def cmp(a, b): return (a>b)-(a<b)
|
||||
|
||||
class TestInfo(object):
|
||||
|
||||
def __init__(self, xmlnode):
|
||||
self.fixture = xmlnode.getAttribute("classname")
|
||||
self.name = xmlnode.getAttribute("name")
|
||||
self.value_param = xmlnode.getAttribute("value_param")
|
||||
self.type_param = xmlnode.getAttribute("type_param")
|
||||
|
||||
custom_status = xmlnode.getAttribute("custom_status")
|
||||
failures = xmlnode.getElementsByTagName("failure")
|
||||
|
||||
if len(custom_status) > 0:
|
||||
self.status = custom_status
|
||||
elif len(failures) > 0:
|
||||
self.status = "failed"
|
||||
else:
|
||||
self.status = xmlnode.getAttribute("status")
|
||||
|
||||
if self.name.startswith("DISABLED_"):
|
||||
if self.status == 'notrun':
|
||||
self.status = "disabled"
|
||||
self.fixture = self.fixture.replace("DISABLED_", "")
|
||||
self.name = self.name.replace("DISABLED_", "")
|
||||
self.properties = {
|
||||
prop.getAttribute("name") : prop.getAttribute("value")
|
||||
for prop in xmlnode.getElementsByTagName("property")
|
||||
if prop.hasAttribute("name") and prop.hasAttribute("value")
|
||||
}
|
||||
self.metrix = {}
|
||||
self.parseLongMetric(xmlnode, "bytesIn");
|
||||
self.parseLongMetric(xmlnode, "bytesOut");
|
||||
self.parseIntMetric(xmlnode, "samples");
|
||||
self.parseIntMetric(xmlnode, "outliers");
|
||||
self.parseFloatMetric(xmlnode, "frequency", 1);
|
||||
self.parseLongMetric(xmlnode, "min");
|
||||
self.parseLongMetric(xmlnode, "median");
|
||||
self.parseLongMetric(xmlnode, "gmean");
|
||||
self.parseLongMetric(xmlnode, "mean");
|
||||
self.parseLongMetric(xmlnode, "stddev");
|
||||
self.parseFloatMetric(xmlnode, "gstddev");
|
||||
self.parseFloatMetric(xmlnode, "time");
|
||||
self.parseLongMetric(xmlnode, "total_memory_usage");
|
||||
|
||||
def parseLongMetric(self, xmlnode, name, default = 0):
|
||||
if name in self.properties:
|
||||
self.metrix[name] = long(self.properties[name])
|
||||
elif xmlnode.hasAttribute(name):
|
||||
self.metrix[name] = long(xmlnode.getAttribute(name))
|
||||
else:
|
||||
self.metrix[name] = default
|
||||
|
||||
def parseIntMetric(self, xmlnode, name, default = 0):
|
||||
if name in self.properties:
|
||||
self.metrix[name] = int(self.properties[name])
|
||||
elif xmlnode.hasAttribute(name):
|
||||
self.metrix[name] = int(xmlnode.getAttribute(name))
|
||||
else:
|
||||
self.metrix[name] = default
|
||||
|
||||
def parseFloatMetric(self, xmlnode, name, default = 0):
|
||||
if name in self.properties:
|
||||
self.metrix[name] = float(self.properties[name])
|
||||
elif xmlnode.hasAttribute(name):
|
||||
self.metrix[name] = float(xmlnode.getAttribute(name))
|
||||
else:
|
||||
self.metrix[name] = default
|
||||
|
||||
def parseStringMetric(self, xmlnode, name, default = None):
|
||||
if name in self.properties:
|
||||
self.metrix[name] = self.properties[name].strip()
|
||||
elif xmlnode.hasAttribute(name):
|
||||
self.metrix[name] = xmlnode.getAttribute(name).strip()
|
||||
else:
|
||||
self.metrix[name] = default
|
||||
|
||||
def get(self, name, units="ms"):
|
||||
if name == "classname":
|
||||
return self.fixture
|
||||
if name == "name":
|
||||
return self.name
|
||||
if name == "fullname":
|
||||
return self.__str__()
|
||||
if name == "value_param":
|
||||
return self.value_param
|
||||
if name == "type_param":
|
||||
return self.type_param
|
||||
if name == "status":
|
||||
return self.status
|
||||
val = self.metrix.get(name, None)
|
||||
if not val:
|
||||
return val
|
||||
if name == "time":
|
||||
return self.metrix.get("time")
|
||||
if name in ["gmean", "min", "mean", "median", "stddev"]:
|
||||
scale = 1.0
|
||||
frequency = self.metrix.get("frequency", 1.0) or 1.0
|
||||
if units == "ms":
|
||||
scale = 1000.0
|
||||
if units == "us" or units == "mks": # mks is typo error for microsecond (<= OpenCV 3.4)
|
||||
scale = 1000000.0
|
||||
if units == "ns":
|
||||
scale = 1000000000.0
|
||||
if units == "ticks":
|
||||
frequency = long(1)
|
||||
scale = long(1)
|
||||
return val * scale / frequency
|
||||
return val
|
||||
|
||||
|
||||
def dump(self, units="ms"):
|
||||
print("%s ->\t\033[1;31m%s\033[0m = \t%.2f%s" % (str(self), self.status, self.get("gmean", units), units))
|
||||
|
||||
|
||||
def getName(self):
|
||||
pos = self.name.find("/")
|
||||
if pos > 0:
|
||||
return self.name[:pos]
|
||||
return self.name
|
||||
|
||||
|
||||
def getFixture(self):
|
||||
if self.fixture.endswith(self.getName()):
|
||||
fixture = self.fixture[:-len(self.getName())]
|
||||
else:
|
||||
fixture = self.fixture
|
||||
if fixture.endswith("_"):
|
||||
fixture = fixture[:-1]
|
||||
return fixture
|
||||
|
||||
|
||||
def param(self):
|
||||
return '::'.join(filter(None, [self.type_param, self.value_param]))
|
||||
|
||||
def shortName(self):
|
||||
name = self.getName()
|
||||
fixture = self.getFixture()
|
||||
return '::'.join(filter(None, [name, fixture]))
|
||||
|
||||
|
||||
def __str__(self):
|
||||
name = self.getName()
|
||||
fixture = self.getFixture()
|
||||
return '::'.join(filter(None, [name, fixture, self.type_param, self.value_param]))
|
||||
|
||||
|
||||
def __cmp__(self, other):
|
||||
r = cmp(self.fixture, other.fixture);
|
||||
if r != 0:
|
||||
return r
|
||||
if self.type_param:
|
||||
if other.type_param:
|
||||
r = cmp(self.type_param, other.type_param);
|
||||
if r != 0:
|
||||
return r
|
||||
else:
|
||||
return -1
|
||||
else:
|
||||
if other.type_param:
|
||||
return 1
|
||||
if self.value_param:
|
||||
if other.value_param:
|
||||
r = cmp(self.value_param, other.value_param);
|
||||
if r != 0:
|
||||
return r
|
||||
else:
|
||||
return -1
|
||||
else:
|
||||
if other.value_param:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
# This is a Sequence for compatibility with old scripts,
|
||||
# which treat parseLogFile's return value as a list.
|
||||
class TestRunInfo(collections.Sequence):
|
||||
def __init__(self, properties, tests):
|
||||
self.properties = properties
|
||||
self.tests = tests
|
||||
|
||||
def __len__(self):
|
||||
return len(self.tests)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.tests[key]
|
||||
|
||||
def parseLogFile(filename):
|
||||
log = parse(filename)
|
||||
|
||||
properties = {
|
||||
attr_name[3:]: attr_value
|
||||
for (attr_name, attr_value) in log.documentElement.attributes.items()
|
||||
if attr_name.startswith('cv_')
|
||||
}
|
||||
|
||||
tests = list(map(TestInfo, log.getElementsByTagName("testcase")))
|
||||
|
||||
return TestRunInfo(properties, tests)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage:\n", os.path.basename(sys.argv[0]), "<log_name>.xml")
|
||||
exit(0)
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
print("Processing {}...".format(arg))
|
||||
|
||||
run = parseLogFile(arg)
|
||||
|
||||
print("Properties:")
|
||||
|
||||
for (prop_name, prop_value) in run.properties.items():
|
||||
print("\t{} = {}".format(prop_name, prop_value))
|
||||
|
||||
print("Tests:")
|
||||
|
||||
for t in sorted(run.tests):
|
||||
t.dump()
|
||||
|
||||
print()
|
440
3rdparty/opencv-4.5.4/modules/ts/misc/trace_profiler.py
vendored
Normal file
440
3rdparty/opencv-4.5.4/modules/ts/misc/trace_profiler.py
vendored
Normal file
@ -0,0 +1,440 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import sys
|
||||
import csv
|
||||
from pprint import pprint
|
||||
from collections import deque
|
||||
|
||||
try:
|
||||
long # Python 2
|
||||
except NameError:
|
||||
long = int # Python 3
|
||||
|
||||
# trace.hpp
|
||||
REGION_FLAG_IMPL_MASK = 15 << 16
|
||||
REGION_FLAG_IMPL_IPP = 1 << 16
|
||||
REGION_FLAG_IMPL_OPENCL = 2 << 16
|
||||
|
||||
DEBUG = False
|
||||
|
||||
if DEBUG:
|
||||
dprint = print
|
||||
dpprint = pprint
|
||||
else:
|
||||
def dprint(args, **kwargs):
|
||||
pass
|
||||
def dpprint(args, **kwargs):
|
||||
pass
|
||||
|
||||
def tryNum(s):
|
||||
if s.startswith('0x'):
|
||||
try:
|
||||
return int(s, 16)
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
return int(s)
|
||||
except ValueError:
|
||||
pass
|
||||
if sys.version_info[0] < 3:
|
||||
try:
|
||||
return long(s)
|
||||
except ValueError:
|
||||
pass
|
||||
return s
|
||||
|
||||
def formatTimestamp(t):
|
||||
return "%.3f" % (t * 1e-6)
|
||||
|
||||
try:
|
||||
from statistics import median
|
||||
except ImportError:
|
||||
def median(lst):
|
||||
sortedLst = sorted(lst)
|
||||
lstLen = len(lst)
|
||||
index = (lstLen - 1) // 2
|
||||
if (lstLen % 2):
|
||||
return sortedLst[index]
|
||||
else:
|
||||
return (sortedLst[index] + sortedLst[index + 1]) * 0.5
|
||||
|
||||
def getCXXFunctionName(spec):
|
||||
def dropParams(spec):
|
||||
pos = len(spec) - 1
|
||||
depth = 0
|
||||
while pos >= 0:
|
||||
if spec[pos] == ')':
|
||||
depth = depth + 1
|
||||
elif spec[pos] == '(':
|
||||
depth = depth - 1
|
||||
if depth == 0:
|
||||
if pos == 0 or spec[pos - 1] in ['#', ':']:
|
||||
res = dropParams(spec[pos+1:-1])
|
||||
return (spec[:pos] + res[0], res[1])
|
||||
return (spec[:pos], spec[pos:])
|
||||
pos = pos - 1
|
||||
return (spec, '')
|
||||
|
||||
def extractName(spec):
|
||||
pos = len(spec) - 1
|
||||
inName = False
|
||||
while pos >= 0:
|
||||
if spec[pos] == ' ':
|
||||
if inName:
|
||||
return spec[pos+1:]
|
||||
elif spec[pos].isalnum():
|
||||
inName = True
|
||||
pos = pos - 1
|
||||
return spec
|
||||
|
||||
if spec.startswith('IPP') or spec.startswith('OpenCL'):
|
||||
prefix_size = len('IPP') if spec.startswith('IPP') else len('OpenCL')
|
||||
prefix = spec[:prefix_size]
|
||||
if prefix_size < len(spec) and spec[prefix_size] in ['#', ':']:
|
||||
prefix = prefix + spec[prefix_size]
|
||||
prefix_size = prefix_size + 1
|
||||
begin = prefix_size
|
||||
while begin < len(spec):
|
||||
if spec[begin].isalnum() or spec[begin] in ['_', ':']:
|
||||
break
|
||||
begin = begin + 1
|
||||
if begin == len(spec):
|
||||
return spec
|
||||
end = begin
|
||||
while end < len(spec):
|
||||
if not (spec[end].isalnum() or spec[end] in ['_', ':']):
|
||||
break
|
||||
end = end + 1
|
||||
return prefix + spec[begin:end]
|
||||
|
||||
spec = spec.replace(') const', ')') # const methods
|
||||
(ret_type_name, params) = dropParams(spec)
|
||||
name = extractName(ret_type_name)
|
||||
if 'operator' in name:
|
||||
return name + params
|
||||
if name.startswith('&'):
|
||||
return name[1:]
|
||||
return name
|
||||
|
||||
stack_size = 10
|
||||
|
||||
class Trace:
|
||||
def __init__(self, filename=None):
|
||||
self.tasks = {}
|
||||
self.tasks_list = []
|
||||
self.locations = {}
|
||||
self.threads_stack = {}
|
||||
self.pending_files = deque()
|
||||
if filename:
|
||||
self.load(filename)
|
||||
|
||||
class TraceTask:
|
||||
def __init__(self, threadID, taskID, locationID, beginTimestamp):
|
||||
self.threadID = threadID
|
||||
self.taskID = taskID
|
||||
self.locationID = locationID
|
||||
self.beginTimestamp = beginTimestamp
|
||||
self.endTimestamp = None
|
||||
self.parentTaskID = None
|
||||
self.parentThreadID = None
|
||||
self.childTask = []
|
||||
self.selfTimeIPP = 0
|
||||
self.selfTimeOpenCL = 0
|
||||
self.totalTimeIPP = 0
|
||||
self.totalTimeOpenCL = 0
|
||||
|
||||
def __repr__(self):
|
||||
return "TID={} ID={} loc={} parent={}:{} begin={} end={} IPP={}/{} OpenCL={}/{}".format(
|
||||
self.threadID, self.taskID, self.locationID, self.parentThreadID, self.parentTaskID,
|
||||
self.beginTimestamp, self.endTimestamp, self.totalTimeIPP, self.selfTimeIPP, self.totalTimeOpenCL, self.selfTimeOpenCL)
|
||||
|
||||
|
||||
class TraceLocation:
|
||||
def __init__(self, locationID, filename, line, name, flags):
|
||||
self.locationID = locationID
|
||||
self.filename = os.path.split(filename)[1]
|
||||
self.line = line
|
||||
self.name = getCXXFunctionName(name)
|
||||
self.flags = flags
|
||||
|
||||
def __str__(self):
|
||||
return "{}#{}:{}".format(self.name, self.filename, self.line)
|
||||
|
||||
def __repr__(self):
|
||||
return "ID={} {}:{}:{}".format(self.locationID, self.filename, self.line, self.name)
|
||||
|
||||
def parse_file(self, filename):
|
||||
dprint("Process file: '{}'".format(filename))
|
||||
with open(filename) as infile:
|
||||
for line in infile:
|
||||
line = str(line).strip()
|
||||
if line[0] == "#":
|
||||
if line.startswith("#thread file:"):
|
||||
name = str(line.split(':', 1)[1]).strip()
|
||||
self.pending_files.append(os.path.join(os.path.split(filename)[0], name))
|
||||
continue
|
||||
self.parse_line(line)
|
||||
|
||||
def parse_line(self, line):
|
||||
opts = line.split(',')
|
||||
dpprint(opts)
|
||||
if opts[0] == 'l':
|
||||
opts = list(csv.reader([line]))[0] # process quote more
|
||||
locationID = int(opts[1])
|
||||
filename = str(opts[2])
|
||||
line = int(opts[3])
|
||||
name = opts[4]
|
||||
flags = tryNum(opts[5])
|
||||
self.locations[locationID] = self.TraceLocation(locationID, filename, line, name, flags)
|
||||
return
|
||||
extra_opts = {}
|
||||
for e in opts[5:]:
|
||||
if not '=' in e:
|
||||
continue
|
||||
(k, v) = e.split('=')
|
||||
extra_opts[k] = tryNum(v)
|
||||
if extra_opts:
|
||||
dpprint(extra_opts)
|
||||
threadID = None
|
||||
taskID = None
|
||||
locationID = None
|
||||
ts = None
|
||||
if opts[0] in ['b', 'e']:
|
||||
threadID = int(opts[1])
|
||||
taskID = int(opts[4])
|
||||
locationID = int(opts[3])
|
||||
ts = tryNum(opts[2])
|
||||
thread_stack = None
|
||||
currentTask = (None, None)
|
||||
if threadID is not None:
|
||||
if not threadID in self.threads_stack:
|
||||
thread_stack = deque()
|
||||
self.threads_stack[threadID] = thread_stack
|
||||
else:
|
||||
thread_stack = self.threads_stack[threadID]
|
||||
currentTask = None if not thread_stack else thread_stack[-1]
|
||||
t = (threadID, taskID)
|
||||
if opts[0] == 'b':
|
||||
assert not t in self.tasks, "Duplicate task: " + str(t) + repr(self.tasks[t])
|
||||
task = self.TraceTask(threadID, taskID, locationID, ts)
|
||||
self.tasks[t] = task
|
||||
self.tasks_list.append(task)
|
||||
thread_stack.append((threadID, taskID))
|
||||
if currentTask:
|
||||
task.parentThreadID = currentTask[0]
|
||||
task.parentTaskID = currentTask[1]
|
||||
if 'parentThread' in extra_opts:
|
||||
task.parentThreadID = extra_opts['parentThread']
|
||||
if 'parent' in extra_opts:
|
||||
task.parentTaskID = extra_opts['parent']
|
||||
if opts[0] == 'e':
|
||||
task = self.tasks[t]
|
||||
task.endTimestamp = ts
|
||||
if 'tIPP' in extra_opts:
|
||||
task.selfTimeIPP = extra_opts['tIPP']
|
||||
if 'tOCL' in extra_opts:
|
||||
task.selfTimeOpenCL = extra_opts['tOCL']
|
||||
thread_stack.pop()
|
||||
|
||||
def load(self, filename):
|
||||
self.pending_files.append(filename)
|
||||
if DEBUG:
|
||||
with open(filename, 'r') as f:
|
||||
print(f.read(), end='')
|
||||
while self.pending_files:
|
||||
self.parse_file(self.pending_files.pop())
|
||||
|
||||
def getParentTask(self, task):
|
||||
return self.tasks.get((task.parentThreadID, task.parentTaskID), None)
|
||||
|
||||
def process(self):
|
||||
self.tasks_list.sort(key=lambda x: x.beginTimestamp)
|
||||
|
||||
parallel_for_location = None
|
||||
for (id, l) in self.locations.items():
|
||||
if l.name == 'parallel_for':
|
||||
parallel_for_location = l.locationID
|
||||
break
|
||||
|
||||
for task in self.tasks_list:
|
||||
try:
|
||||
task.duration = task.endTimestamp - task.beginTimestamp
|
||||
task.selfDuration = task.duration
|
||||
except:
|
||||
task.duration = None
|
||||
task.selfDuration = None
|
||||
task.totalTimeIPP = task.selfTimeIPP
|
||||
task.totalTimeOpenCL = task.selfTimeOpenCL
|
||||
|
||||
dpprint(self.tasks)
|
||||
dprint("Calculate total times")
|
||||
|
||||
for task in self.tasks_list:
|
||||
parentTask = self.getParentTask(task)
|
||||
if parentTask:
|
||||
parentTask.selfDuration = parentTask.selfDuration - task.duration
|
||||
parentTask.childTask.append(task)
|
||||
timeIPP = task.selfTimeIPP
|
||||
timeOpenCL = task.selfTimeOpenCL
|
||||
while parentTask:
|
||||
if parentTask.locationID == parallel_for_location: # TODO parallel_for
|
||||
break
|
||||
parentLocation = self.locations[parentTask.locationID]
|
||||
if (parentLocation.flags & REGION_FLAG_IMPL_MASK) == REGION_FLAG_IMPL_IPP:
|
||||
parentTask.selfTimeIPP = parentTask.selfTimeIPP - timeIPP
|
||||
timeIPP = 0
|
||||
else:
|
||||
parentTask.totalTimeIPP = parentTask.totalTimeIPP + timeIPP
|
||||
if (parentLocation.flags & REGION_FLAG_IMPL_MASK) == REGION_FLAG_IMPL_OPENCL:
|
||||
parentTask.selfTimeOpenCL = parentTask.selfTimeOpenCL - timeOpenCL
|
||||
timeOpenCL = 0
|
||||
else:
|
||||
parentTask.totalTimeOpenCL = parentTask.totalTimeOpenCL + timeOpenCL
|
||||
parentTask = self.getParentTask(parentTask)
|
||||
|
||||
dpprint(self.tasks)
|
||||
dprint("Calculate total times (parallel_for)")
|
||||
|
||||
for task in self.tasks_list:
|
||||
if task.locationID == parallel_for_location:
|
||||
task.selfDuration = 0
|
||||
childDuration = sum([t.duration for t in task.childTask])
|
||||
if task.duration == 0 or childDuration == 0:
|
||||
continue
|
||||
timeCoef = task.duration / float(childDuration)
|
||||
childTimeIPP = sum([t.totalTimeIPP for t in task.childTask])
|
||||
childTimeOpenCL = sum([t.totalTimeOpenCL for t in task.childTask])
|
||||
if childTimeIPP == 0 and childTimeOpenCL == 0:
|
||||
continue
|
||||
timeIPP = childTimeIPP * timeCoef
|
||||
timeOpenCL = childTimeOpenCL * timeCoef
|
||||
parentTask = task
|
||||
while parentTask:
|
||||
parentLocation = self.locations[parentTask.locationID]
|
||||
if (parentLocation.flags & REGION_FLAG_IMPL_MASK) == REGION_FLAG_IMPL_IPP:
|
||||
parentTask.selfTimeIPP = parentTask.selfTimeIPP - timeIPP
|
||||
timeIPP = 0
|
||||
else:
|
||||
parentTask.totalTimeIPP = parentTask.totalTimeIPP + timeIPP
|
||||
if (parentLocation.flags & REGION_FLAG_IMPL_MASK) == REGION_FLAG_IMPL_OPENCL:
|
||||
parentTask.selfTimeOpenCL = parentTask.selfTimeOpenCL - timeOpenCL
|
||||
timeOpenCL = 0
|
||||
else:
|
||||
parentTask.totalTimeOpenCL = parentTask.totalTimeOpenCL + timeOpenCL
|
||||
parentTask = self.getParentTask(parentTask)
|
||||
|
||||
dpprint(self.tasks)
|
||||
dprint("Done")
|
||||
|
||||
def dump(self, max_entries):
|
||||
assert isinstance(max_entries, int)
|
||||
|
||||
class CallInfo():
|
||||
def __init__(self, callID):
|
||||
self.callID = callID
|
||||
self.totalTimes = []
|
||||
self.selfTimes = []
|
||||
self.threads = set()
|
||||
self.selfTimesIPP = []
|
||||
self.selfTimesOpenCL = []
|
||||
self.totalTimesIPP = []
|
||||
self.totalTimesOpenCL = []
|
||||
|
||||
calls = {}
|
||||
|
||||
for currentTask in self.tasks_list:
|
||||
task = currentTask
|
||||
callID = []
|
||||
for i in range(stack_size):
|
||||
callID.append(task.locationID)
|
||||
task = self.getParentTask(task)
|
||||
if not task:
|
||||
break
|
||||
callID = tuple(callID)
|
||||
if not callID in calls:
|
||||
call = CallInfo(callID)
|
||||
calls[callID] = call
|
||||
else:
|
||||
call = calls[callID]
|
||||
call.totalTimes.append(currentTask.duration)
|
||||
call.selfTimes.append(currentTask.selfDuration)
|
||||
call.threads.add(currentTask.threadID)
|
||||
call.selfTimesIPP.append(currentTask.selfTimeIPP)
|
||||
call.selfTimesOpenCL.append(currentTask.selfTimeOpenCL)
|
||||
call.totalTimesIPP.append(currentTask.totalTimeIPP)
|
||||
call.totalTimesOpenCL.append(currentTask.totalTimeOpenCL)
|
||||
|
||||
dpprint(self.tasks)
|
||||
dpprint(self.locations)
|
||||
dpprint(calls)
|
||||
|
||||
calls_self_sum = {k: sum(v.selfTimes) for (k, v) in calls.items()}
|
||||
calls_total_sum = {k: sum(v.totalTimes) for (k, v) in calls.items()}
|
||||
calls_median = {k: median(v.selfTimes) for (k, v) in calls.items()}
|
||||
calls_sorted = sorted(calls.keys(), key=lambda x: calls_self_sum[x], reverse=True)
|
||||
|
||||
calls_self_sum_IPP = {k: sum(v.selfTimesIPP) for (k, v) in calls.items()}
|
||||
calls_total_sum_IPP = {k: sum(v.totalTimesIPP) for (k, v) in calls.items()}
|
||||
|
||||
calls_self_sum_OpenCL = {k: sum(v.selfTimesOpenCL) for (k, v) in calls.items()}
|
||||
calls_total_sum_OpenCL = {k: sum(v.totalTimesOpenCL) for (k, v) in calls.items()}
|
||||
|
||||
if max_entries > 0 and len(calls_sorted) > max_entries:
|
||||
calls_sorted = calls_sorted[:max_entries]
|
||||
|
||||
def formatPercents(p):
|
||||
if p is not None:
|
||||
return "{:>3d}".format(int(p*100))
|
||||
return ''
|
||||
|
||||
name_width = 70
|
||||
timestamp_width = 12
|
||||
def fmtTS():
|
||||
return '{:>' + str(timestamp_width) + '}'
|
||||
fmt = "{:>3} {:<"+str(name_width)+"} {:>8} {:>3}"+((' '+fmtTS())*5)+((' '+fmtTS()+' {:>3}')*2)
|
||||
fmt2 = "{:>3} {:<"+str(name_width)+"} {:>8} {:>3}"+((' '+fmtTS())*5)+((' '+fmtTS()+' {:>3}')*2)
|
||||
print(fmt.format("ID", "name", "count", "thr", "min", "max", "median", "avg", "*self*", "IPP", "%", "OpenCL", "%"))
|
||||
print(fmt2.format("", "", "", "", "t-min", "t-max", "t-median", "t-avg", "total", "t-IPP", "%", "t-OpenCL", "%"))
|
||||
for (index, callID) in enumerate(calls_sorted):
|
||||
call_self_times = calls[callID].selfTimes
|
||||
loc0 = self.locations[callID[0]]
|
||||
loc_array = [] # [str(callID)]
|
||||
for (i, l) in enumerate(callID):
|
||||
loc = self.locations[l]
|
||||
loc_array.append(loc.name if i > 0 else str(loc))
|
||||
loc_str = '|'.join(loc_array)
|
||||
if len(loc_str) > name_width: loc_str = loc_str[:name_width-3]+'...'
|
||||
print(fmt.format(index + 1, loc_str, len(call_self_times),
|
||||
len(calls[callID].threads),
|
||||
formatTimestamp(min(call_self_times)),
|
||||
formatTimestamp(max(call_self_times)),
|
||||
formatTimestamp(calls_median[callID]),
|
||||
formatTimestamp(sum(call_self_times)/float(len(call_self_times))),
|
||||
formatTimestamp(sum(call_self_times)),
|
||||
formatTimestamp(calls_self_sum_IPP[callID]),
|
||||
formatPercents(calls_self_sum_IPP[callID] / float(calls_self_sum[callID])) if calls_self_sum[callID] > 0 else formatPercents(None),
|
||||
formatTimestamp(calls_self_sum_OpenCL[callID]),
|
||||
formatPercents(calls_self_sum_OpenCL[callID] / float(calls_self_sum[callID])) if calls_self_sum[callID] > 0 else formatPercents(None),
|
||||
))
|
||||
call_total_times = calls[callID].totalTimes
|
||||
print(fmt2.format("", "", "", "",
|
||||
formatTimestamp(min(call_total_times)),
|
||||
formatTimestamp(max(call_total_times)),
|
||||
formatTimestamp(median(call_total_times)),
|
||||
formatTimestamp(sum(call_total_times)/float(len(call_total_times))),
|
||||
formatTimestamp(sum(call_total_times)),
|
||||
formatTimestamp(calls_total_sum_IPP[callID]),
|
||||
formatPercents(calls_total_sum_IPP[callID] / float(calls_total_sum[callID])) if calls_total_sum[callID] > 0 else formatPercents(None),
|
||||
formatTimestamp(calls_total_sum_OpenCL[callID]),
|
||||
formatPercents(calls_total_sum_OpenCL[callID] / float(calls_total_sum[callID])) if calls_total_sum[callID] > 0 else formatPercents(None),
|
||||
))
|
||||
print()
|
||||
|
||||
if __name__ == "__main__":
|
||||
tracefile = sys.argv[1] if len(sys.argv) > 1 else 'OpenCVTrace.txt'
|
||||
count = int(sys.argv[2]) if len(sys.argv) > 2 else 10
|
||||
trace = Trace(tracefile)
|
||||
trace.process()
|
||||
trace.dump(max_entries = count)
|
||||
print("OK")
|
374
3rdparty/opencv-4.5.4/modules/ts/misc/xls-report.py
vendored
Executable file
374
3rdparty/opencv-4.5.4/modules/ts/misc/xls-report.py
vendored
Executable file
@ -0,0 +1,374 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
This script can generate XLS reports from OpenCV tests' XML output files.
|
||||
|
||||
To use it, first, create a directory for each machine you ran tests on.
|
||||
Each such directory will become a sheet in the report. Put each XML file
|
||||
into the corresponding directory.
|
||||
|
||||
Then, create your configuration file(s). You can have a global configuration
|
||||
file (specified with the -c option), and per-sheet configuration files, which
|
||||
must be called sheet.conf and placed in the directory corresponding to the sheet.
|
||||
The settings in the per-sheet configuration file will override those in the
|
||||
global configuration file, if both are present.
|
||||
|
||||
A configuration file must consist of a Python dictionary. The following keys
|
||||
will be recognized:
|
||||
|
||||
* 'comparisons': [{'from': string, 'to': string}]
|
||||
List of configurations to compare performance between. For each item,
|
||||
the sheet will have a column showing speedup from configuration named
|
||||
'from' to configuration named "to".
|
||||
|
||||
* 'configuration_matchers': [{'properties': {string: object}, 'name': string}]
|
||||
Instructions for matching test run property sets to configuration names.
|
||||
|
||||
For each found XML file:
|
||||
|
||||
1) All attributes of the root element starting with the prefix 'cv_' are
|
||||
placed in a dictionary, with the cv_ prefix stripped and the cv_module_name
|
||||
element deleted.
|
||||
|
||||
2) The first matcher for which the XML's file property set contains the same
|
||||
keys with equal values as its 'properties' dictionary is searched for.
|
||||
A missing property can be matched by using None as the value.
|
||||
|
||||
Corollary 1: you should place more specific matchers before less specific
|
||||
ones.
|
||||
|
||||
Corollary 2: an empty 'properties' dictionary matches every property set.
|
||||
|
||||
3) If a matching matcher is found, its 'name' string is presumed to be the name
|
||||
of the configuration the XML file corresponds to. A warning is printed if
|
||||
two different property sets match to the same configuration name.
|
||||
|
||||
4) If a such a matcher isn't found, if --include-unmatched was specified, the
|
||||
configuration name is assumed to be the relative path from the sheet's
|
||||
directory to the XML file's containing directory. If the XML file isinstance
|
||||
directly inside the sheet's directory, the configuration name is instead
|
||||
a dump of all its properties. If --include-unmatched wasn't specified,
|
||||
the XML file is ignored and a warning is printed.
|
||||
|
||||
* 'configurations': [string]
|
||||
List of names for compile-time and runtime configurations of OpenCV.
|
||||
Each item will correspond to a column of the sheet.
|
||||
|
||||
* 'module_colors': {string: string}
|
||||
Mapping from module name to color name. In the sheet, cells containing module
|
||||
names from this mapping will be colored with the corresponding color. You can
|
||||
find the list of available colors here:
|
||||
<http://www.simplistix.co.uk/presentations/python-excel.pdf>.
|
||||
|
||||
* 'sheet_name': string
|
||||
Name for the sheet. If this parameter is missing, the name of sheet's directory
|
||||
will be used.
|
||||
|
||||
* 'sheet_properties': [(string, string)]
|
||||
List of arbitrary (key, value) pairs that somehow describe the sheet. Will be
|
||||
dumped into the first row of the sheet in string form.
|
||||
|
||||
Note that all keys are optional, although to get useful results, you'll want to
|
||||
specify at least 'configurations' and 'configuration_matchers'.
|
||||
|
||||
Finally, run the script. Use the --help option for usage information.
|
||||
"""
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import ast
|
||||
import errno
|
||||
import fnmatch
|
||||
import logging
|
||||
import numbers
|
||||
import os, os.path
|
||||
import re
|
||||
|
||||
from argparse import ArgumentParser
|
||||
from glob import glob
|
||||
from itertools import ifilter
|
||||
|
||||
import xlwt
|
||||
|
||||
from testlog_parser import parseLogFile
|
||||
|
||||
re_image_size = re.compile(r'^ \d+ x \d+$', re.VERBOSE)
|
||||
re_data_type = re.compile(r'^ (?: 8 | 16 | 32 | 64 ) [USF] C [1234] $', re.VERBOSE)
|
||||
|
||||
time_style = xlwt.easyxf(num_format_str='#0.00')
|
||||
no_time_style = xlwt.easyxf('pattern: pattern solid, fore_color gray25')
|
||||
failed_style = xlwt.easyxf('pattern: pattern solid, fore_color red')
|
||||
noimpl_style = xlwt.easyxf('pattern: pattern solid, fore_color orange')
|
||||
style_dict = {"failed": failed_style, "noimpl":noimpl_style}
|
||||
|
||||
speedup_style = time_style
|
||||
good_speedup_style = xlwt.easyxf('font: color green', num_format_str='#0.00')
|
||||
bad_speedup_style = xlwt.easyxf('font: color red', num_format_str='#0.00')
|
||||
no_speedup_style = no_time_style
|
||||
error_speedup_style = xlwt.easyxf('pattern: pattern solid, fore_color orange')
|
||||
header_style = xlwt.easyxf('font: bold true; alignment: horizontal centre, vertical top, wrap True')
|
||||
subheader_style = xlwt.easyxf('alignment: horizontal centre, vertical top')
|
||||
|
||||
class Collector(object):
|
||||
def __init__(self, config_match_func, include_unmatched):
|
||||
self.__config_cache = {}
|
||||
self.config_match_func = config_match_func
|
||||
self.include_unmatched = include_unmatched
|
||||
self.tests = {}
|
||||
self.extra_configurations = set()
|
||||
|
||||
# Format a sorted sequence of pairs as if it was a dictionary.
|
||||
# We can't just use a dictionary instead, since we want to preserve the sorted order of the keys.
|
||||
@staticmethod
|
||||
def __format_config_cache_key(pairs, multiline=False):
|
||||
return (
|
||||
('{\n' if multiline else '{') +
|
||||
(',\n' if multiline else ', ').join(
|
||||
(' ' if multiline else '') + repr(k) + ': ' + repr(v) for (k, v) in pairs) +
|
||||
('\n}\n' if multiline else '}')
|
||||
)
|
||||
|
||||
def collect_from(self, xml_path, default_configuration):
|
||||
run = parseLogFile(xml_path)
|
||||
|
||||
module = run.properties['module_name']
|
||||
|
||||
properties = run.properties.copy()
|
||||
del properties['module_name']
|
||||
|
||||
props_key = tuple(sorted(properties.iteritems())) # dicts can't be keys
|
||||
|
||||
if props_key in self.__config_cache:
|
||||
configuration = self.__config_cache[props_key]
|
||||
else:
|
||||
configuration = self.config_match_func(properties)
|
||||
|
||||
if configuration is None:
|
||||
if self.include_unmatched:
|
||||
if default_configuration is not None:
|
||||
configuration = default_configuration
|
||||
else:
|
||||
configuration = Collector.__format_config_cache_key(props_key, multiline=True)
|
||||
|
||||
self.extra_configurations.add(configuration)
|
||||
else:
|
||||
logging.warning('failed to match properties to a configuration: %s',
|
||||
Collector.__format_config_cache_key(props_key))
|
||||
|
||||
else:
|
||||
same_config_props = [it[0] for it in self.__config_cache.iteritems() if it[1] == configuration]
|
||||
if len(same_config_props) > 0:
|
||||
logging.warning('property set %s matches the same configuration %r as property set %s',
|
||||
Collector.__format_config_cache_key(props_key),
|
||||
configuration,
|
||||
Collector.__format_config_cache_key(same_config_props[0]))
|
||||
|
||||
self.__config_cache[props_key] = configuration
|
||||
|
||||
if configuration is None: return
|
||||
|
||||
module_tests = self.tests.setdefault(module, {})
|
||||
|
||||
for test in run.tests:
|
||||
test_results = module_tests.setdefault((test.shortName(), test.param()), {})
|
||||
new_result = test.get("gmean") if test.status == 'run' else test.status
|
||||
test_results[configuration] = min(
|
||||
test_results.get(configuration), new_result,
|
||||
key=lambda r: (1, r) if isinstance(r, numbers.Number) else
|
||||
(2,) if r is not None else
|
||||
(3,)
|
||||
) # prefer lower result; prefer numbers to errors and errors to nothing
|
||||
|
||||
def make_match_func(matchers):
|
||||
def match_func(properties):
|
||||
for matcher in matchers:
|
||||
if all(properties.get(name) == value
|
||||
for (name, value) in matcher['properties'].iteritems()):
|
||||
return matcher['name']
|
||||
|
||||
return None
|
||||
|
||||
return match_func
|
||||
|
||||
def main():
|
||||
arg_parser = ArgumentParser(description='Build an XLS performance report.')
|
||||
arg_parser.add_argument('sheet_dirs', nargs='+', metavar='DIR', help='directory containing perf test logs')
|
||||
arg_parser.add_argument('-o', '--output', metavar='XLS', default='report.xls', help='name of output file')
|
||||
arg_parser.add_argument('-c', '--config', metavar='CONF', help='global configuration file')
|
||||
arg_parser.add_argument('--include-unmatched', action='store_true',
|
||||
help='include results from XML files that were not recognized by configuration matchers')
|
||||
arg_parser.add_argument('--show-times-per-pixel', action='store_true',
|
||||
help='for tests that have an image size parameter, show per-pixel time, as well as total time')
|
||||
|
||||
args = arg_parser.parse_args()
|
||||
|
||||
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
|
||||
|
||||
if args.config is not None:
|
||||
with open(args.config) as global_conf_file:
|
||||
global_conf = ast.literal_eval(global_conf_file.read())
|
||||
else:
|
||||
global_conf = {}
|
||||
|
||||
wb = xlwt.Workbook()
|
||||
|
||||
for sheet_path in args.sheet_dirs:
|
||||
try:
|
||||
with open(os.path.join(sheet_path, 'sheet.conf')) as sheet_conf_file:
|
||||
sheet_conf = ast.literal_eval(sheet_conf_file.read())
|
||||
except IOError as ioe:
|
||||
if ioe.errno != errno.ENOENT: raise
|
||||
sheet_conf = {}
|
||||
logging.debug('no sheet.conf for %s', sheet_path)
|
||||
|
||||
sheet_conf = dict(global_conf.items() + sheet_conf.items())
|
||||
|
||||
config_names = sheet_conf.get('configurations', [])
|
||||
config_matchers = sheet_conf.get('configuration_matchers', [])
|
||||
|
||||
collector = Collector(make_match_func(config_matchers), args.include_unmatched)
|
||||
|
||||
for root, _, filenames in os.walk(sheet_path):
|
||||
logging.info('looking in %s', root)
|
||||
for filename in fnmatch.filter(filenames, '*.xml'):
|
||||
if os.path.normpath(sheet_path) == os.path.normpath(root):
|
||||
default_conf = None
|
||||
else:
|
||||
default_conf = os.path.relpath(root, sheet_path)
|
||||
collector.collect_from(os.path.join(root, filename), default_conf)
|
||||
|
||||
config_names.extend(sorted(collector.extra_configurations - set(config_names)))
|
||||
|
||||
sheet = wb.add_sheet(sheet_conf.get('sheet_name', os.path.basename(os.path.abspath(sheet_path))))
|
||||
|
||||
sheet_properties = sheet_conf.get('sheet_properties', [])
|
||||
|
||||
sheet.write(0, 0, 'Properties:')
|
||||
|
||||
sheet.write(0, 1,
|
||||
'N/A' if len(sheet_properties) == 0 else
|
||||
' '.join(str(k) + '=' + repr(v) for (k, v) in sheet_properties))
|
||||
|
||||
sheet.row(2).height = 800
|
||||
sheet.panes_frozen = True
|
||||
sheet.remove_splits = True
|
||||
|
||||
sheet_comparisons = sheet_conf.get('comparisons', [])
|
||||
|
||||
row = 2
|
||||
|
||||
col = 0
|
||||
|
||||
for (w, caption) in [
|
||||
(2500, 'Module'),
|
||||
(10000, 'Test'),
|
||||
(2000, 'Image\nwidth'),
|
||||
(2000, 'Image\nheight'),
|
||||
(2000, 'Data\ntype'),
|
||||
(7500, 'Other parameters')]:
|
||||
sheet.col(col).width = w
|
||||
if args.show_times_per_pixel:
|
||||
sheet.write_merge(row, row + 1, col, col, caption, header_style)
|
||||
else:
|
||||
sheet.write(row, col, caption, header_style)
|
||||
col += 1
|
||||
|
||||
for config_name in config_names:
|
||||
if args.show_times_per_pixel:
|
||||
sheet.col(col).width = 3000
|
||||
sheet.col(col + 1).width = 3000
|
||||
sheet.write_merge(row, row, col, col + 1, config_name, header_style)
|
||||
sheet.write(row + 1, col, 'total, ms', subheader_style)
|
||||
sheet.write(row + 1, col + 1, 'per pixel, ns', subheader_style)
|
||||
col += 2
|
||||
else:
|
||||
sheet.col(col).width = 4000
|
||||
sheet.write(row, col, config_name, header_style)
|
||||
col += 1
|
||||
|
||||
col += 1 # blank column between configurations and comparisons
|
||||
|
||||
for comp in sheet_comparisons:
|
||||
sheet.col(col).width = 4000
|
||||
caption = comp['to'] + '\nvs\n' + comp['from']
|
||||
if args.show_times_per_pixel:
|
||||
sheet.write_merge(row, row + 1, col, col, caption, header_style)
|
||||
else:
|
||||
sheet.write(row, col, caption, header_style)
|
||||
col += 1
|
||||
|
||||
row += 2 if args.show_times_per_pixel else 1
|
||||
|
||||
sheet.horz_split_pos = row
|
||||
sheet.horz_split_first_visible = row
|
||||
|
||||
module_colors = sheet_conf.get('module_colors', {})
|
||||
module_styles = {module: xlwt.easyxf('pattern: pattern solid, fore_color {}'.format(color))
|
||||
for module, color in module_colors.iteritems()}
|
||||
|
||||
for module, tests in sorted(collector.tests.iteritems()):
|
||||
for ((test, param), configs) in sorted(tests.iteritems()):
|
||||
sheet.write(row, 0, module, module_styles.get(module, xlwt.Style.default_style))
|
||||
sheet.write(row, 1, test)
|
||||
|
||||
param_list = param[1:-1].split(', ') if param.startswith('(') and param.endswith(')') else [param]
|
||||
|
||||
image_size = next(ifilter(re_image_size.match, param_list), None)
|
||||
if image_size is not None:
|
||||
(image_width, image_height) = map(int, image_size.split('x', 1))
|
||||
sheet.write(row, 2, image_width)
|
||||
sheet.write(row, 3, image_height)
|
||||
del param_list[param_list.index(image_size)]
|
||||
|
||||
data_type = next(ifilter(re_data_type.match, param_list), None)
|
||||
if data_type is not None:
|
||||
sheet.write(row, 4, data_type)
|
||||
del param_list[param_list.index(data_type)]
|
||||
|
||||
sheet.row(row).write(5, ' | '.join(param_list))
|
||||
|
||||
col = 6
|
||||
|
||||
for c in config_names:
|
||||
if c in configs:
|
||||
sheet.write(row, col, configs[c], style_dict.get(configs[c], time_style))
|
||||
else:
|
||||
sheet.write(row, col, None, no_time_style)
|
||||
col += 1
|
||||
if args.show_times_per_pixel:
|
||||
sheet.write(row, col,
|
||||
xlwt.Formula('{0} * 1000000 / ({1} * {2})'.format(
|
||||
xlwt.Utils.rowcol_to_cell(row, col - 1),
|
||||
xlwt.Utils.rowcol_to_cell(row, 2),
|
||||
xlwt.Utils.rowcol_to_cell(row, 3)
|
||||
)),
|
||||
time_style
|
||||
)
|
||||
col += 1
|
||||
|
||||
col += 1 # blank column
|
||||
|
||||
for comp in sheet_comparisons:
|
||||
cmp_from = configs.get(comp["from"])
|
||||
cmp_to = configs.get(comp["to"])
|
||||
|
||||
if isinstance(cmp_from, numbers.Number) and isinstance(cmp_to, numbers.Number):
|
||||
try:
|
||||
speedup = cmp_from / cmp_to
|
||||
sheet.write(row, col, speedup, good_speedup_style if speedup > 1.1 else
|
||||
bad_speedup_style if speedup < 0.9 else
|
||||
speedup_style)
|
||||
except ArithmeticError as e:
|
||||
sheet.write(row, col, None, error_speedup_style)
|
||||
else:
|
||||
sheet.write(row, col, None, no_speedup_style)
|
||||
|
||||
col += 1
|
||||
|
||||
row += 1
|
||||
if row % 1000 == 0: sheet.flush_row_data()
|
||||
|
||||
wb.save(args.output)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Reference in New Issue
Block a user