Last active
September 1, 2016 11:24
-
-
Save patilarpith/570ccda42727d6ee120e412e3fef94be to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Start of HEAD | |
require 'json' | |
TestStruct = Struct.new( | |
:testcase_id, | |
:testcase_input_path, | |
:testcase_output_path, | |
:testcase_expected_output_path, | |
:metadata_file_paths, | |
:submission_code_path, | |
:testcase_result, | |
:testcase_signal, | |
:testcase_time, | |
:testcase_memory, | |
:data | |
) | |
ResultStruct = Struct.new( | |
:result, | |
:score, | |
:message | |
) | |
# End of HEAD | |
# Start of BODY | |
=begin | |
TestStruct:: | |
testcase_id [long] ID of the test-case | |
testcase_input_path [String] File path to test-case input | |
testcase_output_path [String] File path to test-case output generated by the problem solver | |
testcase_expected_output_path [String] File path to test-case expected output to be matched with | |
metadata_file_paths [ArrayList<String>] File paths to Question metadata (Extra files usually used for defining traning sets) | |
submission_code_path [String] File path to submission source code | |
testcase_result [boolean] Set to true if test-case output matches test-case expected output. Matching is done line by line | |
testcase_signal [long] Exit code of the test-case process | |
testcase_time [double] Time taken by the test-case process in seconds | |
testcase_memory [long] Peak memory of the test-case process determined in bytes | |
data [String] <Future use> | |
ResultStruct:: | |
result [boolean] Assign test-case result. true determines success. false determines failure | |
score [double] Assing test-case score. Normalized between 0 to 1 | |
message [String] Assing test-case message. This message is visible to the problem solver | |
=end | |
# run_custom_checker(TestStruct, ResultStruct) | |
def run_custom_checker(t_obj, r_obj) | |
# Don't print anything to STDOUT in this function | |
# Enter your custom checker scoring logic here | |
puts "testcase_id: #{t_obj.testcase_id}" | |
puts "testcase_input_path: #{t_obj.testcase_input_path}" | |
puts "testcase_output_path: #{t_obj.testcase_output_path}" | |
puts "testcase_expected_output_path: #{t_obj.testcase_expected_output_path}" | |
t_obj.metadata_file_paths.each do |it| | |
puts "metadata_file_paths: #{it}" | |
end | |
puts "submission_code_path: #{t_obj.submission_code_path}" | |
puts "testcase_result: #{t_obj.testcase_result}" | |
puts "testcase_signal: #{t_obj.testcase_signal}" | |
puts "testcase_time: #{t_obj.testcase_time}" | |
puts "testcase_memory: #{t_obj.testcase_memory}" | |
puts "data: #{t_obj.data}" | |
r_obj.result = true; | |
r_obj.score = 1.0; | |
r_obj.message = "Success"; | |
end | |
# End of BODY | |
# Start of TAIL | |
def read_input_json(json_file_path, t_obj) | |
file_contents = File.read(json_file_path) | |
json_obj = JSON.parse (file_contents) | |
begin | |
# Read values | |
t_obj.testcase_id = json_obj["testcase_id"] | |
t_obj.testcase_input_path = json_obj["input_file_path"] | |
t_obj.testcase_output_path = json_obj["output_file_path"] | |
t_obj.testcase_expected_output_path = json_obj["expected_output_file_path"] | |
metadata_file_path_node = json_obj["metadata_file_paths"] | |
if metadata_file_path_node | |
metadata_file_path_node.each do |metadata_file_path| | |
t_obj.metadata_file_paths << metadata_file_path | |
end | |
end | |
t_obj.submission_code_path = json_obj["submission_code_path"] | |
t_obj.testcase_result = json_obj["testcase_result"] | |
t_obj.testcase_signal = json_obj["testcase_signal"] | |
t_obj.testcase_time = json_obj["testcase_time"] | |
t_obj.testcase_memory = json_obj["testcase_memory"] | |
t_obj.data = json_obj["data"] | |
rescue => err | |
return true | |
end | |
return false | |
end | |
def write_result_json(r_obj) | |
json_obj = { | |
custom_result: if r_obj.result then 1 else 0 end, | |
custom_score: if r_obj.score > 1 then 1 elsif r_obj.score < 0 then 0 else r_obj.score end, | |
custom_message: r_obj.message[0..4095] | |
} | |
puts json_obj.to_json | |
end | |
# Input parameters | |
t_obj = TestStruct.new( | |
0, | |
"", | |
"", | |
"", | |
[], | |
"", | |
false, | |
0, | |
0.0, | |
0, | |
"" | |
) | |
# Out parameters | |
r_obj = ResultStruct.new( | |
false, | |
0.0, | |
"Uninitialized" | |
) | |
if(ARGV.length < 1) | |
write_result_json(r_obj) | |
abort() | |
end | |
# Decode input JSON | |
failure = read_input_json(ARGV[0], t_obj) | |
# Incase input JSON was malformed or not existent | |
if failure | |
r_obj.message = "Unable to read input json"; | |
write_result_json(r_obj); | |
abort() | |
end | |
# Run the custom checker evaluator | |
run_custom_checker(t_obj, r_obj); | |
# Encode result JSON | |
write_result_json(r_obj); | |
# End of TAIL |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Testing:
Input file data. Path to be passed as command line argument
Expected output: