[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[freetype2] GSoC-2023-Ahmet 210471bcc: clean whitespace
From: |
Werner Lemberg |
Subject: |
[freetype2] GSoC-2023-Ahmet 210471bcc: clean whitespace |
Date: |
Mon, 18 Sep 2023 07:47:22 -0400 (EDT) |
branch: GSoC-2023-Ahmet
commit 210471bcc6cd355a031690815ed622211ce00f29
Author: goksu <25721443+goeksu@users.noreply.github.com>
Commit: goksu <25721443+goeksu@users.noreply.github.com>
clean whitespace
---
builds/testing.mk | 12 ++-
src/tools/ftbench/ftbench.c | 28 +++----
src/tools/ftbench/src/tohtml.py | 174 +++++++++++++++++++++-------------------
3 files changed, 110 insertions(+), 104 deletions(-)
diff --git a/builds/testing.mk b/builds/testing.mk
index c3771cba5..1a99b714d 100644
--- a/builds/testing.mk
+++ b/builds/testing.mk
@@ -6,7 +6,6 @@ FTBENCH_BIN = $(OBJ_DIR)/bench$E
INCLUDES = $(TOP_DIR)/include
FONTS = $(wildcard $(FTBENCH_DIR)/fonts/*.ttf)
-
# Define objects.
BASELINE_DIR = $(OBJ_DIR)/baseline/
BENCHMARK_DIR = $(OBJ_DIR)/benchmark/
@@ -17,8 +16,7 @@ HTMLCREATOR = $(OBJ_DIR)/tohtml.py
HTMLFILE = $(OBJ_DIR)/benchmark.html
# Define flags by default
-FTBENCH_FLAG ?= -c 1000 -w 100
-
+FTBENCH_FLAG ?= -c 100 -w 10
# Define test fonts all in the fonts folder.
BASELINE = $(addprefix $(BASELINE_DIR), $(notdir $(FONTS:.ttf=.txt)))
@@ -26,7 +24,7 @@ BENCHMARK = $(addprefix $(BENCHMARK_DIR), $(notdir
$(FONTS:.ttf=.txt)))
FT_INCLUDES := $(OBJ_BUILD) \
- $(INCLUDES)
+ $(INCLUDES)
COMPILE = $(CC) $(ANSIFLAGS) \
$(INCLUDES:%=$I%) \
@@ -49,7 +47,7 @@ ifeq ($(PLATFORM),unix)
# link the FreeType library.
LINK_CMD = $(LIBTOOL) --mode=link $(CCraw) \
$(subst /,$(COMPILER_SEP),$(LDFLAGS))
- LINK_LIBS = $(subst /,$(COMPILER_SEP),$(FTLIB) $(EFENCE))
+ LINK_LIBS = $(subst /,$(COMPILER_SEP),$(FTLIB) $(EFENCE))
else
LINK_CMD = $(CC) $(subst /,$(COMPILER_SEP),$(LDFLAGS))
ifeq ($(PLATFORM),unixdev)
@@ -90,12 +88,12 @@ $(BASELINE_DIR) $(BENCHMARK_DIR):
@mkdir -p $@
# Create ftbench object
-$(FTBENCH_OBJ): $(FTBENCH_SRC)
+$(FTBENCH_OBJ): $(FTBENCH_SRC)
@$(COMPILE) $T$(subst /,$(COMPILER_SEP),$@ $<) $(EXTRAFLAGS)
@echo "Object created."
# Build ftbench
-$(FTBENCH_BIN): $(FTBENCH_OBJ)
+$(FTBENCH_BIN): $(FTBENCH_OBJ)
@echo "Linking ftbench..."
@$(LINK_CMD) $T$(subst /,$(COMPILER_SEP),$@ $<) $(LINK_LIBS)
@echo "Built."
diff --git a/src/tools/ftbench/ftbench.c b/src/tools/ftbench/ftbench.c
index 93312e3a4..cdda32c5c 100644
--- a/src/tools/ftbench/ftbench.c
+++ b/src/tools/ftbench/ftbench.c
@@ -281,14 +281,14 @@ static void benchmark(FT_Face face, btest_t* test, int
max_iter, double max_time
double medians[NUM_CHUNKS];
// Cache
- if (test->cache_first) {
- test->bench(face, test->user_data);
+ if (test->cache_first) {
+ test->bench(face, test->user_data);
}
// Warmup
TIMER_START(&timer);
- for(int i = 0; i<warmup; i++)
- test->bench(face, test->user_data);
+ for(int i = 0; i<warmup; i++)
+ test->bench(face, test->user_data);
TIMER_STOP(&timer);
printf(" %-25s ", test->title);
@@ -299,22 +299,22 @@ static void benchmark(FT_Face face, btest_t* test, int
max_iter, double max_time
TIMER_START(&timer);
// Execute a chunk of iterations
- for (n = 0, done = 0; n < CHUNK_SIZE; n++) {
+ for (n = 0, done = 0; n < CHUNK_SIZE; n++) {
done += test->bench(face, test->user_data);
}
TIMER_STOP(&timer);
medians[chunk] = TIMER_GET(&timer);
-
-
+
+
total_time += medians[chunk];
total_done += done;
// Check max_time for each iteration, break if exceeded
if (total_time > 1E6 * max_time)
break;
-
+
}
-
+
qsort(medians, NUM_CHUNKS, sizeof(double), compare);
double final_median;
if (NUM_CHUNKS % 2 == 0) {
@@ -322,7 +322,7 @@ static void benchmark(FT_Face face, btest_t* test, int
max_iter, double max_time
} else {
final_median = medians[NUM_CHUNKS / 2];
}
-
+
printf("%10.1f microseconds %10d done\n", final_median/CHUNK_SIZE,
total_done);
}
@@ -512,7 +512,7 @@ static void benchmark(FT_Face face, btest_t* test, int
max_iter, double max_time
continue;
FT_Glyph_Get_CBox( glyph, FT_GLYPH_BBOX_PIXELS, &bbox );
-
+
FT_Done_Glyph( glyph );
done++;
}
@@ -1307,9 +1307,9 @@ static void benchmark(FT_Face face, btest_t* test, int
max_iter, double max_time
if ( warmup_iter < 0 )
warmup_iter = -warmup_iter;
break;
-
-
-
+
+
+
/* break; */
default:
diff --git a/src/tools/ftbench/src/tohtml.py b/src/tools/ftbench/src/tohtml.py
index 32c86fb8d..c1e50c209 100644
--- a/src/tools/ftbench/src/tohtml.py
+++ b/src/tools/ftbench/src/tohtml.py
@@ -22,18 +22,18 @@ CSS_STYLE = """
}
.col1 {
background-color: #eee;
- }
+ }
+
-
.highlight {
background-color: #0a0;
}
</style>
"""
OBJ_DIR = sys.argv[1]
-BASELINE_DIR = os.path.join(OBJ_DIR,"baseline")
-BENCHMARK_DIR = os.path.join(OBJ_DIR,"benchmark")
-BENCHMARK_HTML = os.path.join(OBJ_DIR,"benchmark.html")
+BASELINE_DIR = os.path.join(OBJ_DIR, "baseline")
+BENCHMARK_DIR = os.path.join(OBJ_DIR, "benchmark")
+BENCHMARK_HTML = os.path.join(OBJ_DIR, "benchmark.html")
FONT_COUNT = 5
@@ -41,11 +41,9 @@ WARNING_SAME_COMMIT = "Warning: Baseline and Benchmark have
the same commit ID!"
INFO_1 = "* Average time for single iteration. Smaller values are better."
INFO_2 = "* If a value in the 'Iterations' column is given as '<i>x | y</i>',
values <i>x</i> and <i>y</i> give the number of iterations in the baseline and
the benchmark test, respectively."
-
-
def main():
- """Entry point for theq script"""
+ """Entry point for theq script"""
with open(BENCHMARK_HTML, "w") as html_file:
write_to_html(html_file, "<html>\n<head>\n")
write_to_html(html_file, CSS_STYLE)
@@ -65,7 +63,7 @@ def main():
# Generate total results table
generate_total_results_table(html_file, BASELINE_DIR, BENCHMARK_DIR)
-
+
# Generate results tables
for filename in os.listdir(BASELINE_DIR):
if filename.endswith(".txt") and not filename == "info.txt":
@@ -75,11 +73,11 @@ def main():
generate_results_table(
html_file, baseline_results, benchmark_results, filename
)
-
-
+
write_to_html(html_file, "<center>Freetype Benchmark</center>\n")
write_to_html(html_file, "</body>\n</html>\n")
+
def write_to_html(html_file, content):
"""Write content to html file"""
html_file.write(content)
@@ -94,7 +92,7 @@ def read_file(file_path):
def parse_info_file(info_file):
"""Get info from info.txt file and return as list"""
info = read_file(info_file)
- info[1] = '<a href="{}{}">{}</a>\n'.format(GITLAB_URL, info[1].strip(),
info[1][:8])
+ info[1] = f'<a href="{GITLAB_URL}{info[1].strip()}">{info[1][:8]}</a>\n'
return info
@@ -111,92 +109,113 @@ def generate_info_table(html_file, baseline_info,
benchmark_info):
):
write_to_html(
html_file,
- '<tr><td class="col1">{}</td><td>{}</td><td>{}</td></tr>\n'.format(
- info, baseline_line.strip(), benchmark_line.strip()
- ),
+ f'<tr><td
class="col1">{info}</td><td>{baseline_line.strip()}</td><td>{benchmark_line.strip()}</td></tr>\n'
)
write_to_html(html_file, "</table><br/>")
write_to_html(html_file, f"<p>{INFO_1}</p>")
write_to_html(html_file, f"<p>{INFO_2}</p>")
-
+
def generate_total_results_table(html_file, baseline_dir, benchmark_dir):
"""Prepare total results table for html"""
-
+
# This dictionary will store aggregated results.
- test_results = {test: {"baseline": 0, "benchmark": 0, "n_baseline": 0,
"n_benchmark": 0} for test in [
- "Load", "Load_Advances (Normal)", "Load_Advances (Fast)",
"Load_Advances (Unscaled)", "Render",
- "Get_Glyph", "Get_Char_Index", "Iterate CMap", "New_Face", "Embolden",
"Stroke", "Get_BBox",
- "Get_CBox", "New_Face & load glyph(s)"
- ]}
-
+ test_results = {
+ test: {"baseline": 0, "benchmark": 0, "n_baseline": 0, "n_benchmark":
0}
+ for test in [
+ "Load",
+ "Load_Advances (Normal)",
+ "Load_Advances (Fast)",
+ "Load_Advances (Unscaled)",
+ "Render",
+ "Get_Glyph",
+ "Get_Char_Index",
+ "Iterate CMap",
+ "New_Face",
+ "Embolden",
+ "Stroke",
+ "Get_BBox",
+ "Get_CBox",
+ "New_Face & load glyph(s)",
+ ]
+ }
+
total_time = 0
-
+
for filename in os.listdir(baseline_dir):
if filename.endswith(".txt") and not filename == "info.txt":
-
baseline_results = read_file(os.path.join(baseline_dir, filename))
benchmark_results = read_file(os.path.join(benchmark_dir,
filename))
-
- for baseline_line, benchmark_line in zip(baseline_results,
benchmark_results):
-
+
+ for baseline_line, benchmark_line in zip(
+ baseline_results, benchmark_results
+ ):
if baseline_line.startswith("Total time:"):
baseline_match = re.match(r"Total time: (\d+)s",
baseline_line)
benchmark_match = re.match(r"Total time: (\d+)s",
benchmark_line)
-
+
if baseline_match and benchmark_match:
total_time += int(baseline_match.group(1))
total_time += int(benchmark_match.group(1))
-
if baseline_line.startswith(" "):
- baseline_match =
re.match(r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", baseline_line)
- benchmark_match =
re.match(r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s", benchmark_line)
-
+ baseline_match = re.match(
+ r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s",
baseline_line
+ )
+ benchmark_match = re.match(
+ r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s",
+ benchmark_line,
+ )
+
if baseline_match and benchmark_match:
test = baseline_match.group(1).strip()
baseline_value = float(baseline_match.group(2))
benchmark_value = float(benchmark_match.group(2))
baseline_n = int(baseline_match.group(3))
benchmark_n = int(benchmark_match.group(3))
-
+
# Aggregate the results
if test in test_results:
test_results[test]["baseline"] += baseline_value
test_results[test]["benchmark"] += benchmark_value
test_results[test]["n_baseline"] += baseline_n
test_results[test]["n_benchmark"] += benchmark_n
-
-
-
+
# Writing to HTML
write_to_html(html_file, "<h2>Total Results</h2>\n")
write_to_html(html_file, '<table border="1">\n')
write_to_html(
html_file,
- '<tr><th>Test</th><th>Iterations</th><th>* Baseline (µs)</th>\
- <th>* Benchmark (µs)</th><th>Difference (%)</th></tr>\n'
+ "<tr><th>Test</th><th>Iterations</th><th>* Baseline (µs)</th>\
+ <th>* Benchmark (µs)</th><th>Difference (%)</th></tr>\n",
)
- total_baseline = total_benchmark = total_diff = total_n_baseline =
total_n_benchmark = 0
-
+ total_baseline = total_benchmark = total_n_baseline = total_n_benchmark = 0
+
for test, values in test_results.items():
-
baseline = values["baseline"] / FONT_COUNT
benchmark = values["benchmark"] / FONT_COUNT
n_baseline = values["n_baseline"] / FONT_COUNT
n_benchmark = values["n_benchmark"] / FONT_COUNT
-
- n_display = f"{n_baseline:.0f} | {n_benchmark:.0f}" if n_baseline !=
n_benchmark else int(n_baseline)
-
- diff = ((baseline - benchmark) / baseline) * 100 if not (baseline -
benchmark) == 0 else 0
+
+ n_display = (
+ f"{n_baseline:.0f} | {n_benchmark:.0f}"
+ if n_baseline != n_benchmark
+ else int(n_baseline)
+ )
+
+ diff = (
+ ((baseline - benchmark) / baseline) * 100
+ if not (baseline - benchmark) == 0
+ else 0
+ )
# Calculate for total row
total_baseline += baseline
total_benchmark += benchmark
total_n_baseline += n_baseline
total_n_benchmark += n_benchmark
-
+
# Check which value is smaller for color highlighting
baseline_color = "highlight" if baseline <= benchmark else ""
benchmark_color = "highlight" if benchmark <= baseline else ""
@@ -205,21 +224,15 @@ def generate_total_results_table(html_file, baseline_dir,
benchmark_dir):
html_file,
f'<tr><td class="col1">{test}</td><td>{n_display}</td>\
<td class="{baseline_color}">{baseline:.1f}</td>\
- <td
class="{benchmark_color}">{benchmark:.1f}</td><td>{diff:.1f}</td></tr>\n'
+ <td
class="{benchmark_color}">{benchmark:.1f}</td><td>{diff:.1f}</td></tr>\n',
)
-
-
- total_diff = ((total_baseline - total_benchmark) / total_baseline) * 100
if not (total_baseline - total_benchmark) == 0 else 0
- total_n_display = f"{total_n_baseline} | {total_n_benchmark}" if
total_n_baseline != total_n_benchmark else str(total_n_baseline)
-
write_to_html(
html_file,
- f'<tr><td class="col1">Total duration for all tests:</td><td
class="col1" colspan="4">{total_time:.0f} s</td>'
+ f'<tr><td class="col1">Total duration for all tests:</td><td
class="col1" colspan="4">{total_time:.0f} s</td>',
)
-
- write_to_html(html_file,'</table>\n')
+ write_to_html(html_file, "</table>\n")
def generate_results_table(html_file, baseline_results, benchmark_results,
filename):
@@ -230,32 +243,27 @@ def generate_results_table(html_file, baseline_results,
benchmark_results, filen
if line.startswith("ftbench results for font")
][0]
- write_to_html(html_file, "<h3>Results for {}</h2>\n".format(fontname))
+ write_to_html(html_file, f"<h3>Results for {fontname}</h2>\n")
write_to_html(html_file, '<table border="1">\n')
write_to_html(
html_file,
- '<tr><th>Test</th><th>Iterations</th>\
- <th>* <a href="{}.txt">Baseline</a> (µs)</th>\
- <th>* <a href="{}.txt">Benchmark</a> (µs)</th>\
- <th>Difference (%)</th></tr>\n'.format(
- os.path.join("./baseline/", filename[:-4]),
- os.path.join("./benchmark/", filename[:-4]),
- ),
+ f'<tr><th>Test</th><th>Iterations</th>\
+ <th>* <a href="{ os.path.join("./baseline/",
filename[:-4])}.txt">Baseline</a> (µs)</th>\
+ <th>* <a href="{ os.path.join("./benchmark/",
filename[:-4])}.txt">Benchmark</a> (µs)</th>\
+ <th>Difference (%)</th></tr>\n'
)
- total_n = total_difference = total_time = 0
+ total_n = total_time = 0
for baseline_line, benchmark_line in zip(baseline_results,
benchmark_results):
-
if baseline_line.startswith("Total time:"):
baseline_match = re.match(r"Total time: (\d+)s", baseline_line)
benchmark_match = re.match(r"Total time: (\d+)s", benchmark_line)
-
+
if baseline_match and benchmark_match:
total_time += int(baseline_match.group(1))
total_time += int(benchmark_match.group(1))
-
if baseline_line.startswith(" "):
baseline_match = re.match(
r"\s+(.*?)\s+(\d+\.\d+)\s+microseconds\s+(\d+)\s",
baseline_line
@@ -269,42 +277,42 @@ def generate_results_table(html_file, baseline_results,
benchmark_results, filen
benchmark_value = float(benchmark_match.group(2))
percentage_diff = (
- (baseline_value - benchmark_value) / baseline_value
- ) * 100 if not (baseline_value - benchmark_value) == 0 else 0
+ ((baseline_value - benchmark_value) / baseline_value) * 100
+ if not (baseline_value - benchmark_value) == 0
+ else 0
+ )
baseline_n = baseline_match.group(3)
benchmark_n = benchmark_match.group(3)
-
n = (
baseline_n
if baseline_n == benchmark_n
else baseline_n + " | " + benchmark_n
)
-
-
total_n += int(baseline_n)
total_n += int(benchmark_n)
-
-
+
# Check which value is smaller for color highlighting
- baseline_color = "highlight" if baseline_value <=
benchmark_value else ""
- benchmark_color = "highlight" if benchmark_value <=
baseline_value else ""
+ baseline_color = (
+ "highlight" if baseline_value <= benchmark_value else ""
+ )
+ benchmark_color = (
+ "highlight" if benchmark_value <= baseline_value else ""
+ )
-
write_to_html(
html_file,
f'<tr><td
class="col1">{baseline_match.group(1)}</td><td>{n}</td>\
- <td class="{baseline_color}">{baseline_value:.1f}</td><td
class="{benchmark_color}">{benchmark_value:.1f}</td><td>{percentage_diff:.1f}</td></tr>\n'
- )
-
+ <td class="{baseline_color}">{baseline_value:.1f}</td><td
class="{benchmark_color}">{benchmark_value:.1f}</td><td>{percentage_diff:.1f}</td></tr>\n',
+ )
+
write_to_html(
html_file,
- f'<tr><td class="col1">Total duration for the font:</td><td
class="col1" colspan="4">{total_time:.0f} s</td></table>\n'
+ f'<tr><td class="col1">Total duration for the font:</td><td
class="col1" colspan="4">{total_time:.0f} s</td></table>\n',
)
-
if __name__ == "__main__":
main()
[Prev in Thread] |
Current Thread |
[Next in Thread] |
- [freetype2] GSoC-2023-Ahmet 210471bcc: clean whitespace,
Werner Lemberg <=