// Numbas version: finer_feedback_settings {"name": "Distributions", "extensions": ["programming"], "custom_part_types": [{"source": {"pk": 195, "author": {"name": "Christian Lawson-Perfect", "pk": 7}, "edit_page": "/part_type/195/edit"}, "name": "Code", "short_name": "mark-code-3", "description": "
Mark code provided by the student by running it and a series of validation and marking tests.
\nThe validation tests are used to reject an answer if the student has misunderstood the task, for example if they haven't defined a required variable or function.
\nMarking tests check properties of the student's code. Each test awards a proportion of the available credit if it is passed.
\nYou can optionally show the student the STDOUT and/or STDERR when running their code.
\nYou can give a preamble and postamble which are run before and after the student's code, and also modify the student's code before running it.
", "help_url": "", "input_widget": "code-editor", "input_options": {"correctAnswer": "if(settings[\"correct_answer_subvars\"],\n render(settings[\"correct_answer\"])\n,\n settings[\"correct_answer\"]\n)", "hint": {"static": false, "value": "\"Write \"+capitalise(language_synonym(settings[\"code_language\"]))+\" code\""}, "language": {"static": false, "value": "language_synonym(settings[\"code_language\"])"}, "placeholder": {"static": false, "value": "if(settings[\"correct_answer_subvars\"],\n render(settings[\"placeholder\"])\n,\n settings[\"placeholder\"]\n)"}, "theme": {"static": true, "value": "textmate"}}, "can_be_gap": true, "can_be_step": true, "marking_script": "mark:\napply(main_error);\napply(show_images);\napply(matplotlib_feedback);\napply(postamble_feedback);\napply(validation_test_feedback);\napply(marking_test_feedback)\n\ninterpreted_answer:\nstudentAnswer\n\nmain_result:\ncode_result[3]\n\nmarking_results:\ncode_result[6..(len(settings[\"tests\"])+6)]\n\nvalidation_results:\ncode_result[(len(settings[\"tests\"])+6)..len(code_result)]\n\nmain_error:\nassert(main_stdout=\"\" or not settings[\"show_stdout\"],\n feedback(\"Your code produced this output:{escape_html(main_stdout)}\")\n);\nassert(main_result[\"success\"],\n warn(\"\"\"There was an error in your code.\"\"\");\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in your code:
{escape_html(main_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in your code.\")\n )\n)\n\nmarking_test_feedback:\nmap(\n let(\n [name,weight,code], test,\n header, \"Test: {name} \",\n if(r[\"success\"],\n let(\n result, r[\"result\"],\n max_credit, weight/total_weight,\n credit, if(result isa \"number\", result, award(1,result)),\n switch(\n credit=0, negative_feedback(header+\"was not passed.\"),\n credit=1, add_credit(max_credit, header+\"was passed.\"),\n add_credit(credit*max_credit, header+\"was partially passed.\")\n )\n )\n ,\n if(settings[\"show_marking_errors\"],\n negative_feedback(\"\"\"There was an error:
{escape_html(r[\"stderr\"])}\"\"\")\n ,\n negative_feedback(header+\"was not passed.\")\n )\n )\n ),\n [test,r],\n zip(settings[\"tests\"],marking_results)\n)\n\nvalidation_test_feedback:\nmap(\n let([name,code], test,\n if(r[\"success\"],\n if(r[\"result\"],\n true\n ,\n warn(\"\"\"Your code failed the test {name}.\"\"\");\n fail(\"\"\"Your code failed the test {name}.\"\"\");false\n )\n ,\n warn(\"\"\"There was an error running the test {name}.\"\"\");\n fail(\"\"\"There was an error running the test {name}:
{escape_html(r[\"stderr\"])}\"\"\")\n )\n ),\n [test,r],\n zip(settings[\"validation_tests\"],validation_results)\n)\n\ntotal_weight:\nsum(map(weight,[name,weight,code],settings[\"tests\"]))\n\npre_submit:\n[run_code(code_language,\n [\n matplotlib_preamble,\n variables_as_code(language_synonym(code_language), settings[\"variables\"]),\n render(settings[\"preamble\"]),\n if(trim(settings[\"modifier\"])=\"\", studentAnswer, eval(expression(settings[\"modifier\"]))),\n render(settings[\"postamble\"]),\n matplotlib_postamble\n ]\n +map(code,[name,marks,code],settings[\"tests\"])\n +map(code,[name,code],settings[\"validation_tests\"])\n)]\n\ncode_result:\npre_submit[\"code_result\"]\n\nmain_stdout:\nsafe(main_result[\"stdout\"])\n\ncode_language:\nsettings[\"code_language\"]\n\npreamble_result:\ncode_result[2]\n\npreamble_stderr:\npreamble_result[\"stderr\"]\n\npostamble_result:\ncode_result[4]\n\npostamble_stderr:\npostamble_result[\"stderr\"]\n\npostamble_feedback:\nassert(postamble_result[\"stdout\"]=\"\",\n feedback(\n if(settings[\"postamble_feedback_whitespace\"],\n html(\"\"\"
{escape_html(postamble_result[\"stdout\"])}\"\"\")\n ,\n postamble_result[\"stdout\"]\n )\n )\n);\nassert(postamble_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in the marking routine postamble:
{escape_html(postamble_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in the marking routine postamble.\")\n )\n)\n\nmatplotlib_preamble:\nif(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n plt.clf() \n\"\"\"),\n \"\"\n)\n\nmatplotlib_postamble:\nswitch(\ncode_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n fig = plt.gcf()\n if fig.get_axes():\n fig.savefig(sys.stdout, format='svg')\n\"\"\"),\n \"\"\n)\n\nmatplotlib_result:\ncode_result[5]\n\nmatplotlib_feedback:\nswitch(\ncode_language=\"pyodide\",\n assert(matplotlib_result[\"stdout\"]=\"\",\n feedback(matplotlib_result[\"stdout\"])\n ),\n \"\"\n)\n\n\n\nimages:\nflatten(map(\n get(r,\"images\",[]),\n r,\n code_result\n))\n\nshow_images:\nassert(len(images)=0 or not settings[\"show_stdout\"],\n feedback(\"Your code produced the following {pluralise(len(images),'image','images')}:\");\n map(\n feedback(html(x)),\n x,\n images\n )\n)", "marking_notes": [{"name": "mark", "description": "This is the main marking note. It should award credit and provide feedback based on the student's answer.", "definition": "apply(main_error);\napply(show_images);\napply(matplotlib_feedback);\napply(postamble_feedback);\napply(validation_test_feedback);\napply(marking_test_feedback)"}, {"name": "interpreted_answer", "description": "A value representing the student's answer to this part.", "definition": "studentAnswer"}, {"name": "main_result", "description": "
The result of running the student's code and the preamble, without any tests.
\nNormally used to detect errors in the student's code.
", "definition": "code_result[3]"}, {"name": "marking_results", "description": "The results of running the marking tests.
", "definition": "code_result[6..(len(settings[\"tests\"])+6)]"}, {"name": "validation_results", "description": "The results of running the validation tests.
", "definition": "code_result[(len(settings[\"tests\"])+6)..len(code_result)]"}, {"name": "main_error", "description": "Show STDOUT if allowed.
\nCheck the student's code runs on its own. Fail if there was an error, and show STDERR if allowed.
", "definition": "assert(main_stdout=\"\" or not settings[\"show_stdout\"],\n feedback(\"Your code produced this output:{escape_html(main_stdout)}\")\n);\nassert(main_result[\"success\"],\n warn(\"\"\"There was an error in your code.\"\"\");\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in your code:
{escape_html(main_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in your code.\")\n )\n)"}, {"name": "marking_test_feedback", "description": "
Feedback on the marking tests. For each test, if the test was passed then add the corresponding amount of credit. If there was an error, show the error.
", "definition": "map(\n let(\n [name,weight,code], test,\n header, \"Test: {name} \",\n if(r[\"success\"],\n let(\n result, r[\"result\"],\n max_credit, weight/total_weight,\n credit, if(result isa \"number\", result, award(1,result)),\n switch(\n credit=0, negative_feedback(header+\"was not passed.\"),\n credit=1, add_credit(max_credit, header+\"was passed.\"),\n add_credit(credit*max_credit, header+\"was partially passed.\")\n )\n )\n ,\n if(settings[\"show_marking_errors\"],\n negative_feedback(\"\"\"There was an error:{escape_html(r[\"stderr\"])}\"\"\")\n ,\n negative_feedback(header+\"was not passed.\")\n )\n )\n ),\n [test,r],\n zip(settings[\"tests\"],marking_results)\n)"}, {"name": "validation_test_feedback", "description": "
Give feedback on the validation tests. If any of them are not passed, the student's answer is invalid.
", "definition": "map(\n let([name,code], test,\n if(r[\"success\"],\n if(r[\"result\"],\n true\n ,\n warn(\"\"\"Your code failed the test {name}.\"\"\");\n fail(\"\"\"Your code failed the test {name}.\"\"\");false\n )\n ,\n warn(\"\"\"There was an error running the test {name}.\"\"\");\n fail(\"\"\"There was an error running the test {name}:{escape_html(r[\"stderr\"])}\"\"\")\n )\n ),\n [test,r],\n zip(settings[\"validation_tests\"],validation_results)\n)"}, {"name": "total_weight", "description": "
The sum of the weights of the marking tests. Each test's weight is divided by this to produce a proportion of the available credit.
", "definition": "sum(map(weight,[name,weight,code],settings[\"tests\"]))"}, {"name": "pre_submit", "description": "The code blocks to run.
\nIn order, they are:
\nThe results of the code blocks: a list with an entry corresponding to each block of code.
", "definition": "pre_submit[\"code_result\"]"}, {"name": "main_stdout", "description": "The stdout from the student's code.
", "definition": "safe(main_result[\"stdout\"])"}, {"name": "code_language", "description": "The language the code is written in. Either \"pyodide\" (Python) or \"webr\" (R)
", "definition": "settings[\"code_language\"]"}, {"name": "preamble_result", "description": "The result of running the preamble block.
", "definition": "code_result[2]"}, {"name": "preamble_stderr", "description": "The STDERR produced by the preamble block.
", "definition": "preamble_result[\"stderr\"]"}, {"name": "postamble_result", "description": "The result of running the postamble.
", "definition": "code_result[4]"}, {"name": "postamble_stderr", "description": "The STDERR produced by the postamble block.
", "definition": "postamble_result[\"stderr\"]"}, {"name": "postamble_feedback", "description": "Show the STDOUT from the postamble, if there is any.
", "definition": "assert(postamble_result[\"stdout\"]=\"\",\n feedback(\n if(settings[\"postamble_feedback_whitespace\"],\n html(\"\"\"{escape_html(postamble_result[\"stdout\"])}\"\"\")\n ,\n postamble_result[\"stdout\"]\n )\n )\n);\nassert(postamble_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in the marking routine postamble:
{escape_html(postamble_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in the marking routine postamble.\")\n )\n)"}, {"name": "matplotlib_preamble", "description": "
Preamble for a hack to ensure that figures produced by matplotlib in Python are displayed.
\nThis code clears the matplotlib output, if matplotlib has been loaded.
", "definition": "if(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n plt.clf() \n\"\"\"),\n \"\"\n)"}, {"name": "matplotlib_postamble", "description": "A hack to show any figures produced with matplotlib in the stdout.
", "definition": "switch(\ncode_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n fig = plt.gcf()\n if fig.get_axes():\n fig.savefig(sys.stdout, format='svg')\n\"\"\"),\n \"\"\n)"}, {"name": "matplotlib_result", "description": "The result of running the matplotlib hack.
", "definition": "code_result[5]"}, {"name": "matplotlib_feedback", "description": "Feedback from the matplotlib hack: if a figure is produced, it's displayed as SVG here.
", "definition": "switch(\ncode_language=\"pyodide\",\n assert(matplotlib_result[\"stdout\"]=\"\",\n feedback(matplotlib_result[\"stdout\"])\n ),\n \"\"\n)\n\n"}, {"name": "images", "description": "Any images produced by the code blocks.
", "definition": "flatten(map(\n get(r,\"images\",[]),\n r,\n code_result\n))"}, {"name": "show_images", "description": "Show the images produced by the code.
", "definition": "assert(len(images)=0 or not settings[\"show_stdout\"],\n feedback(\"Your code produced the following {pluralise(len(images),'image','images')}:\");\n map(\n feedback(html(x)),\n x,\n images\n )\n)"}], "settings": [{"name": "show_input_hint", "label": "Show the input hint?", "help_url": "", "hint": "", "input_type": "checkbox", "default_value": true}, {"name": "code_language", "label": "Code language", "help_url": "", "hint": "The language that the student's code will be written in.", "input_type": "dropdown", "default_value": "pyodide", "choices": [{"value": "pyodide", "label": "Python"}, {"value": "webr", "label": "R"}]}, {"name": "correct_answer", "label": "Correct answer", "help_url": "", "hint": "A correct answer to the part.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "correct_answer_subvars", "label": "Substitute question variables into the correct answer?", "help_url": "", "hint": "If ticked, then JME expressions between curly braces will be evaluated and substituted into the correct answer.studentAnswer
.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "preamble", "label": "Preamble", "help_url": "", "hint": "This code is run before the student's code. Define anything that the student's code or your tests need.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "postamble", "label": "Postamble", "help_url": "", "hint": "This code is run after the student's code but before the validation and unit tests.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "postamble_feedback_whitespace", "label": "Format postamble output as code?", "help_url": "", "hint": "If ticked, any output produced by the postamble will be formatted in monospace font, with whitespace preserved. If not ticked, it'll be presented as prose text or HTML.", "input_type": "checkbox", "default_value": false}, {"name": "tests", "label": "Marking tests", "help_url": "", "hint": "A list of tests used to mark the student's answer.A list of tests used to validate that the student's code is acceptable.
Each item is a list with two string values:
The student must write R code to generate samples and calculate CDFs of some common distributions.
", "licence": "Creative Commons Attribution 4.0 International"}, "statement": "", "advice": "", "rulesets": {}, "extensions": ["programming"], "builtin_constants": {"e": true, "pi,\u03c0": true, "i": true}, "constants": [], "variables": {"mu": {"name": "mu", "group": "Random parameters", "definition": "random(-3 .. 3#1)", "description": "$\\mu$ used in part a.
", "templateType": "randrange", "can_override": false}, "sigma": {"name": "sigma", "group": "Random parameters", "definition": "random(0.1 .. 1#0.1)", "description": "$\\sigma$ used in part a
", "templateType": "randrange", "can_override": false}, "n": {"name": "n", "group": "Random parameters", "definition": "random(10 .. 15#1)", "description": "Random sample size for part a and part b
", "templateType": "randrange", "can_override": false}, "x": {"name": "x", "group": "Random parameters", "definition": "random(1..n)", "description": "Random x used in Binomial question part b
", "templateType": "anything", "can_override": false}, "p": {"name": "p", "group": "Random parameters", "definition": "random(0.1 .. 0.9#0.1)", "description": "Random $p$ used for Binomial in part b
", "templateType": "randrange", "can_override": false}, "y": {"name": "y", "group": "Random parameters", "definition": "random(5 .. 10#1)", "description": "Random y for part c
", "templateType": "randrange", "can_override": false}, "lambda": {"name": "lambda", "group": "Random parameters", "definition": "random(5 .. 10#1)", "description": "$\\lambda$ for Poisson in part c
", "templateType": "randrange", "can_override": false}, "alpha": {"name": "alpha", "group": "Random parameters", "definition": "random(1 .. 5#0.1)", "description": "Rate parameter for exponential distribution in part d
", "templateType": "randrange", "can_override": false}}, "variablesTest": {"condition": "", "maxRuns": 100}, "ungrouped_variables": [], "variable_groups": [{"name": "Random parameters", "variables": ["mu", "sigma", "n", "x", "p", "lambda", "y", "alpha"]}], "functions": {}, "preamble": {"js": "", "css": ""}, "parts": [{"type": "mark-code-3", "useCustomName": false, "customName": "", "marks": 1, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Give the command to generate $\\var{n}$ random samples from a Normal distribution with $\\mu=\\var{mu}$ and $\\sigma=\\var{sigma}$.
", "settings": {"show_input_hint": true, "code_language": "webr", "correct_answer": "rnorm({n},{mu},{sigma})", "correct_answer_subvars": true, "show_stdout": false, "show_stderr": false, "show_marking_errors": false, "placeholder": "", "modifier": "", "preamble": "set.seed(2906)", "postamble": "student_answer=.Last.value\nset.seed(2906)", "tests": "[\n [\"Correct calculation\", 1, \"identical(student_answer,rnorm({n},{mu},{sigma}))\"]\n]", "validation_tests": "[\n]", "variables": "dict()"}}, {"type": "mark-code-3", "useCustomName": false, "customName": "", "marks": 1, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Give the command to calculate $Pr(X=\\var{x})$ for $X\\sim \\operatorname{Bin}(\\var{n},\\var{p})$
", "alternatives": [{"type": "mark-code-3", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "alternativeFeedbackMessage": "pbinom
calculates the cumulative distribution, try dbinom
Give the command to calculate $Pr(Y\\le\\var{y})$ for $Y\\sim \\operatorname{Po}(\\var{lambda})$
", "alternatives": [{"type": "mark-code-3", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "alternativeFeedbackMessage": "dpois
gives the probability mass function, try ppois
for the cumulative distribution function
Give the command to generate a single random sample from an exponential distribution with rate parameter $\\var{alpha}$
", "settings": {"show_input_hint": true, "code_language": "webr", "correct_answer": "rexp(1,{alpha})", "correct_answer_subvars": true, "show_stdout": false, "show_stderr": false, "show_marking_errors": false, "placeholder": "", "modifier": "", "preamble": "set.seed(2906)", "postamble": "student_answer=.Last.value\nset.seed(2906)", "tests": "[\n [\"Correct calculation\", 1, \"identical(student_answer,rexp(1,{alpha}))\"]\n]", "validation_tests": "[\n]", "variables": "dict()"}}], "partsMode": "all", "maxMarks": 0, "objectives": [], "penalties": [], "objectiveVisibility": "always", "penaltyVisibility": "always", "type": "question", "contributors": [{"name": "Christian Lawson-Perfect", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/7/"}, {"name": "Aamir Khan", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/4537/"}]}]}], "contributors": [{"name": "Christian Lawson-Perfect", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/7/"}, {"name": "Aamir Khan", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/4537/"}]}