Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
Matthew P. Russell
unit_test
Commits
22c836de
Commit
22c836de
authored
Dec 16, 2020
by
Matthew P. Russell
Browse files
ready for production testing
parent
3a1fb3cf
Changes
1
Hide whitespace changes
Inline
Side-by-side
bin/#unit_test#
deleted
100755 → 0
View file @
3a1fb3cf
#!/g/15/2020uc/support/bin/python3.7
"""
unit_test
matt russell
comp15 2020 summer
updated 12/11/2020
unit_test - a frictionless C++ unit testing framework.
details relating to this script can be found in the README
"""
import
subprocess
import
sys
import
re
import
os
import
shutil
from
math
import
ceil
#-----------------------------------------------------------------------------#
# initial setup #
#-----------------------------------------------------------------------------#
ORIG_DRIVERFILE
=
'/comp/15/lib/unit_test_driver.cpp'
DRIVERFILE
=
'./unit_test_driver.cpp'
# Must match Makefile
# color codes for pretty printing to the terminal
FAILURE
=
"31"
SUCCESS
=
"32"
INFO
=
"34"
# inform prints the provided string to cout in the provided color
# if linebreak == True, add a dotted line
# if extraline == True, add an extra newline
def
inform
(
s
,
color
=
INFO
,
linebreak
=
False
,
extraline
=
False
):
sys
.
stderr
.
write
(
"
\033
[1;"
+
color
+
"m"
+
s
+
"
\033
[0m
\n
"
)
if
linebreak
:
sys
.
stderr
.
write
(
"-----------------------------------
\n
"
)
if
extraline
:
sys
.
stderr
.
write
(
"
\n
"
)
# cleanup removes the copied driver
def
cleanup
():
os
.
remove
(
DRIVERFILE
)
inform
(
"
\n
setting up tests"
,
color
=
INFO
,
linebreak
=
True
)
# copy the (incomplete) driver to the cwd
try
:
shutil
.
copyfile
(
ORIG_DRIVERFILE
,
DRIVERFILE
)
except
shutil
.
SameFileError
:
# if the driver is already here
pass
#-----------------------------------------------------------------------------#
# extract test names and function definitions from the testing file(s) #
#-----------------------------------------------------------------------------#
test_files
=
[
f
for
f
in
os
.
listdir
(
'./'
)
if
"_tests.cpp"
in
f
]
test_names
=
[]
decl_lines
=
[]
for
test_file_name
in
test_files
:
curr_test_file
=
open
(
test_file_name
,
'r'
).
read
()
# extract the void function declarations that take no args
curr_test_list
=
re
.
findall
(
r
'\n\s*void .*\(\)'
,
curr_test_file
)
# extract the names of the tests for this test file
test_names
.
extend
([
x
.
split
()[
1
].
split
(
'('
)[
0
]
for
x
in
curr_test_list
])
# extract the function definitions of the tests for this test file
# these will be used to forward declare the functions in the driver
decl_lines
.
extend
([
x
.
lstrip
()
+
';
\n
'
for
x
in
curr_test_list
])
#-----------------------------------------------------------------------------#
# augment the driver file with the test information #
#-----------------------------------------------------------------------------#
# extract the contents of the driver file as a list of strings (one per line)
with
open
(
DRIVERFILE
,
'r'
)
as
driver_file
:
driver_contents
=
driver_file
.
readlines
()
# find the right locations to insert the forward declarations and the pairs
for
i
,
line
in
enumerate
(
driver_contents
):
if
"typedef void (*FnPtr)();"
in
line
:
decl_line_num
=
i
+
1
if
"std::map<std::string, FnPtr> tests"
in
line
:
pair_line_num
=
i
+
1
# insert the pairs (note these are later in the file, so do them first)
for
test_name
in
test_names
:
pair_line
=
"
\t
{
\"
"
+
test_name
+
"
\"
, "
+
test_name
+
" },
\n
"
driver_contents
.
insert
(
pair_line_num
,
pair_line
)
pair_line_num
+=
1
# insert the forward declarations
for
decl_line
in
decl_lines
:
driver_contents
.
insert
(
decl_line_num
,
decl_line
)
decl_line_num
+=
1
# save the new driver file
driver_contents
=
""
.
join
(
driver_contents
)
with
open
(
DRIVERFILE
,
'w'
)
as
driver_file
:
driver_file
.
write
(
driver_contents
)
inform
(
"tests were set up successfully"
,
color
=
SUCCESS
,
extraline
=
True
)
#-----------------------------------------------------------------------------#
# initialize variables for keeping track of the grade #
#-----------------------------------------------------------------------------#
# total points
overall_score
=
100.0
# total points valgrind tests are worth
valgrind_score
=
15.0
# number of points the standard tests are worth
standard_score
=
overall_score
-
valgrind_score
# score keeps track of the student's overall score
score
=
0.0
# test_results stores the results of the regular tests
standard_test_results
=
[]
# valgrind_test_results stores the results of the valgrind tests
valgrind_test_results
=
[]
# how many tests there are
num_tests
=
len
(
test_names
)
# points per standard test
standard_test_max
=
round
(
standard_score
/
num_tests
,
3
)
# points per valgrind test
valgrind_test_max
=
round
(
valgrind_score
/
num_tests
,
3
)
#-----------------------------------------------------------------------------#
# compile the tests #
#-----------------------------------------------------------------------------#
inform
(
"compiling tests"
,
color
=
INFO
,
linebreak
=
True
)
compilation_result
=
subprocess
.
run
([
'make'
],
capture_output
=
True
,
universal_newlines
=
True
)
# if compilation fails, show the result and quit
if
compilation_result
.
returncode
==
0
:
inform
(
"compilation passed"
,
color
=
SUCCESS
,
extraline
=
True
)
else
:
inform
(
"compilation failed"
,
color
=
FAILURE
,
linebreak
=
True
)
inform
(
compilation_result
.
stderr
,
color
=
FAILURE
,
extraline
=
True
)
subprocess
.
run
([
'make'
,
'clean'
])
cleanup
()
sys
.
exit
(
1
)
#-----------------------------------------------------------------------------#
# run the tests #
#-----------------------------------------------------------------------------#
for
test
in
test_names
:
human_readable_name
=
" "
.
join
(
test
.
split
(
'_'
))
inform
(
human_readable_name
,
color
=
INFO
,
linebreak
=
True
)
# run the test and capture the output of cerr
test_result
=
subprocess
.
run
([
"./a.out"
,
test
],
capture_output
=
True
,
universal_newlines
=
True
)
#test was successful
if
test_result
.
returncode
==
0
:
score
+=
standard_test_max
inform
(
"test passed"
,
color
=
SUCCESS
)
# Run valgrind
valgrind_command
=
[
'valgrind'
,
'--leak-check=full'
,
'--show-leak-kinds=all'
,
'./a.out'
,
test
]
VALGRIND_RESULT
=
subprocess
.
run
(
valgrind_command
,
capture_output
=
True
,
universal_newlines
=
True
)
# memory errors force nonzero return code by default
if
VALGRIND_RESULT
.
returncode
!=
0
:
valgrind_passed
=
False
else
:
valgrind_passed
=
True
# Assume True, and set to False if leaks are present
valgrind_passphrase
=
"All heap blocks were freed -- no leaks are possible"
if
valgrind_passphrase
not
in
VALGRIND_RESULT
.
stderr
:
# only capture the part after "LEAK SUMMARY"
VALGRIND_RESULT
=
VALGRIND_RESULT
.
stderr
.
split
(
"LEAK SUMMARY"
)[
1
]
# check for leaks
for
line
in
VALGRIND_RESULT
.
split
(
'
\n
'
):
if
"lost"
in
line
and
"0 bytes in 0 blocks"
not
in
line
:
valgrind_passed
=
False
break
if
valgrind_passed
:
inform
(
"valgrind passed"
,
color
=
SUCCESS
,
extraline
=
True
)
score
+=
valgrind_test_max
else
:
inform
(
"valgrind failed"
,
color
=
FAILURE
,
linebreak
=
True
)
inform
(
VALGRIND_RESULT
.
stderr
,
color
=
FAILURE
,
extraline
=
True
)
# if here, then the regular test failed
else
:
inform
(
"test failed"
,
color
=
FAILURE
)
if
abs
(
test_result
.
returncode
)
==
11
:
inform
(
"segmentation fault!"
,
color
=
FAILURE
)
inform
(
"valgrind failed by default"
,
color
=
FAILURE
,
extraline
=
True
)
inform
(
"output"
,
color
=
FAILURE
,
linebreak
=
True
)
inform
(
test_result
.
stderr
,
color
=
FAILURE
,
extraline
=
True
)
# make sure final score is <= max (might happen b/c we're rounding)
final_score
=
min
([
overall_score
,
round
(
score
,
2
)])
# don't sandbag anyone b/c rounding
if
final_score
>
99
:
final_score
=
100
inform
(
"final score: "
+
str
(
final_score
)
+
" / "
+
str
(
overall_score
),
color
=
INFO
,
extraline
=
True
)
# clean up
subprocess
.
run
([
'make'
,
'clean'
])
cleanup
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment