// Numbas version: finer_feedback_settings {"name": "Problem Solving Exercises 1", "metadata": {"description": "", "licence": "Creative Commons Attribution 4.0 International"}, "duration": 0, "percentPass": 0, "showQuestionGroupNames": false, "shuffleQuestionGroups": false, "showstudentname": true, "question_groups": [{"name": "Group", "pickingStrategy": "all-ordered", "pickQuestions": 1, "questionNames": ["", ""], "variable_overrides": [[], []], "questions": [{"name": "Application of maximum-likelihood estimation", "extensions": ["programming"], "custom_part_types": [{"source": {"pk": 195, "author": {"name": "Christian Lawson-Perfect", "pk": 7}, "edit_page": "/part_type/195/edit"}, "name": "Code", "short_name": "mark-code-3", "description": "
Mark code provided by the student by running it and a series of validation and marking tests.
\nThe validation tests are used to reject an answer if the student has misunderstood the task, for example if they haven't defined a required variable or function.
\nMarking tests check properties of the student's code. Each test awards a proportion of the available credit if it is passed.
\nYou can optionally show the student the STDOUT and/or STDERR when running their code.
\nYou can give a preamble and postamble which are run before and after the student's code, and also modify the student's code before running it.
", "help_url": "", "input_widget": "code-editor", "input_options": {"correctAnswer": "if(settings[\"correct_answer_subvars\"],\n render(settings[\"correct_answer\"])\n,\n settings[\"correct_answer\"]\n)", "hint": {"static": false, "value": "\"Write \"+capitalise(language_synonym(settings[\"code_language\"]))+\" code\""}, "language": {"static": false, "value": "language_synonym(settings[\"code_language\"])"}, "placeholder": {"static": false, "value": "if(settings[\"correct_answer_subvars\"],\n render(settings[\"placeholder\"])\n,\n settings[\"placeholder\"]\n)"}, "theme": {"static": true, "value": "textmate"}}, "can_be_gap": true, "can_be_step": true, "marking_script": "mark:\napply(main_error);\napply(show_images);\napply(matplotlib_feedback);\napply(postamble_feedback);\napply(validation_test_feedback);\napply(marking_test_feedback)\n\ninterpreted_answer:\nstudentAnswer\n\nmain_result:\ncode_result[3]\n\nmarking_results:\ncode_result[6..(len(settings[\"tests\"])+6)]\n\nvalidation_results:\ncode_result[(len(settings[\"tests\"])+6)..len(code_result)]\n\nmain_error:\nassert(main_stdout=\"\" or not settings[\"show_stdout\"],\n feedback(\"Your code produced this output:{escape_html(main_stdout)}\")\n);\nassert(main_result[\"success\"],\n warn(\"\"\"There was an error in your code.\"\"\");\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in your code: {escape_html(main_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in your code.\")\n )\n)\n\nmarking_test_feedback:\nmap(\n let(\n [name,weight,code], test,\n header, \"Test: {name} \",\n if(r[\"success\"],\n let(\n result, r[\"result\"],\n max_credit, weight/total_weight,\n credit, if(result isa \"number\", result, award(1,result)),\n switch(\n credit=0, negative_feedback(header+\"was not passed.\"),\n credit=1, add_credit(max_credit, header+\"was passed.\"),\n add_credit(credit*max_credit, header+\"was partially passed.\")\n )\n )\n ,\n if(settings[\"show_marking_errors\"],\n negative_feedback(\"\"\"There was an error: {escape_html(r[\"stderr\"])}\"\"\")\n ,\n negative_feedback(header+\"was not passed.\")\n )\n )\n ),\n [test,r],\n zip(settings[\"tests\"],marking_results)\n)\n\nvalidation_test_feedback:\nmap(\n let([name,code], test,\n if(r[\"success\"],\n if(r[\"result\"],\n true\n ,\n warn(\"\"\"Your code failed the test {name}.\"\"\");\n fail(\"\"\"Your code failed the test {name}.\"\"\");false\n )\n ,\n warn(\"\"\"There was an error running the test {name}.\"\"\");\n fail(\"\"\"There was an error running the test {name}: {escape_html(r[\"stderr\"])}\"\"\")\n )\n ),\n [test,r],\n zip(settings[\"validation_tests\"],validation_results)\n)\n\ntotal_weight:\nsum(map(weight,[name,weight,code],settings[\"tests\"]))\n\npre_submit:\nif(studentAnswer=nothing,\n []\n,\n [run_code(code_language,\n [\n matplotlib_preamble,\n variables_as_code(language_synonym(code_language), settings[\"variables\"]),\n render(settings[\"preamble\"]),\n if(trim(settings[\"modifier\"])=\"\", studentAnswer, eval(expression(settings[\"modifier\"]))),\n render(settings[\"postamble\"]),\n matplotlib_postamble\n ]\n +map(code,[name,marks,code],settings[\"tests\"])\n +map(code,[name,code],settings[\"validation_tests\"])\n )]\n)\n\ncode_result:\npre_submit[\"code_result\"]\n\nmain_stdout:\nsafe(main_result[\"stdout\"])\n\ncode_language:\nsettings[\"code_language\"]\n\npreamble_result:\ncode_result[2]\n\npreamble_stderr:\npreamble_result[\"stderr\"]\n\npostamble_result:\ncode_result[4]\n\npostamble_stderr:\npostamble_result[\"stderr\"]\n\npostamble_feedback:\nassert(postamble_result[\"stdout\"]=\"\",\n feedback(\n if(settings[\"postamble_feedback_whitespace\"],\n html(\"\"\"{escape_html(postamble_result[\"stdout\"])}\"\"\")\n ,\n postamble_result[\"stdout\"]\n )\n )\n);\nassert(postamble_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in the marking routine postamble: {escape_html(postamble_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in the marking routine postamble.\")\n )\n)\n\nmatplotlib_preamble:\nif(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n plt.clf() \n\"\"\"),\n \"\"\n)\n\nmatplotlib_postamble:\nswitch(\ncode_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n fig = plt.gcf()\n if fig.get_axes():\n fig.savefig(sys.stdout, format='svg')\n\"\"\"),\n \"\"\n)\n\nmatplotlib_result:\ncode_result[5]\n\nmatplotlib_feedback:\nswitch(\ncode_language=\"pyodide\",\n assert(matplotlib_result[\"stdout\"]=\"\",\n feedback(matplotlib_result[\"stdout\"])\n ),\n \"\"\n)\n\n\n\nimages:\nflatten(map(\n get(r,\"images\",[]),\n r,\n code_result\n))\n\nshow_images:\nassert(len(images)=0 or not settings[\"show_stdout\"],\n feedback(\"Your code produced the following {pluralise(len(images),'image','images')}:\");\n map(\n feedback(html(x)),\n x,\n images\n )\n)", "marking_notes": [{"name": "mark", "description": "This is the main marking note. It should award credit and provide feedback based on the student's answer.", "definition": "apply(main_error);\napply(show_images);\napply(matplotlib_feedback);\napply(postamble_feedback);\napply(validation_test_feedback);\napply(marking_test_feedback)"}, {"name": "interpreted_answer", "description": "A value representing the student's answer to this part.", "definition": "studentAnswer"}, {"name": "main_result", "description": "The result of running the student's code and the preamble, without any tests.
\nNormally used to detect errors in the student's code.
", "definition": "code_result[3]"}, {"name": "marking_results", "description": "The results of running the marking tests.
", "definition": "code_result[6..(len(settings[\"tests\"])+6)]"}, {"name": "validation_results", "description": "The results of running the validation tests.
", "definition": "code_result[(len(settings[\"tests\"])+6)..len(code_result)]"}, {"name": "main_error", "description": "Show STDOUT if allowed.
\nCheck the student's code runs on its own. Fail if there was an error, and show STDERR if allowed.
", "definition": "assert(main_stdout=\"\" or not settings[\"show_stdout\"],\n feedback(\"Your code produced this output:{escape_html(main_stdout)}\")\n);\nassert(main_result[\"success\"],\n warn(\"\"\"There was an error in your code.\"\"\");\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in your code: {escape_html(main_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in your code.\")\n )\n)"}, {"name": "marking_test_feedback", "description": "Feedback on the marking tests. For each test, if the test was passed then add the corresponding amount of credit. If there was an error, show the error.
", "definition": "map(\n let(\n [name,weight,code], test,\n header, \"Test: {name} \",\n if(r[\"success\"],\n let(\n result, r[\"result\"],\n max_credit, weight/total_weight,\n credit, if(result isa \"number\", result, award(1,result)),\n switch(\n credit=0, negative_feedback(header+\"was not passed.\"),\n credit=1, add_credit(max_credit, header+\"was passed.\"),\n add_credit(credit*max_credit, header+\"was partially passed.\")\n )\n )\n ,\n if(settings[\"show_marking_errors\"],\n negative_feedback(\"\"\"There was an error:{escape_html(r[\"stderr\"])}\"\"\")\n ,\n negative_feedback(header+\"was not passed.\")\n )\n )\n ),\n [test,r],\n zip(settings[\"tests\"],marking_results)\n)"}, {"name": "validation_test_feedback", "description": "Give feedback on the validation tests. If any of them are not passed, the student's answer is invalid.
", "definition": "map(\n let([name,code], test,\n if(r[\"success\"],\n if(r[\"result\"],\n true\n ,\n warn(\"\"\"Your code failed the test {name}.\"\"\");\n fail(\"\"\"Your code failed the test {name}.\"\"\");false\n )\n ,\n warn(\"\"\"There was an error running the test {name}.\"\"\");\n fail(\"\"\"There was an error running the test {name}:{escape_html(r[\"stderr\"])}\"\"\")\n )\n ),\n [test,r],\n zip(settings[\"validation_tests\"],validation_results)\n)"}, {"name": "total_weight", "description": "The sum of the weights of the marking tests. Each test's weight is divided by this to produce a proportion of the available credit.
", "definition": "sum(map(weight,[name,weight,code],settings[\"tests\"]))"}, {"name": "pre_submit", "description": "The code blocks to run.
\nIn order, they are:
\nThe results of the code blocks: a list with an entry corresponding to each block of code.
", "definition": "pre_submit[\"code_result\"]"}, {"name": "main_stdout", "description": "The stdout from the student's code.
", "definition": "safe(main_result[\"stdout\"])"}, {"name": "code_language", "description": "The language the code is written in. Either \"pyodide\" (Python) or \"webr\" (R)
", "definition": "settings[\"code_language\"]"}, {"name": "preamble_result", "description": "The result of running the preamble block.
", "definition": "code_result[2]"}, {"name": "preamble_stderr", "description": "The STDERR produced by the preamble block.
", "definition": "preamble_result[\"stderr\"]"}, {"name": "postamble_result", "description": "The result of running the postamble.
", "definition": "code_result[4]"}, {"name": "postamble_stderr", "description": "The STDERR produced by the postamble block.
", "definition": "postamble_result[\"stderr\"]"}, {"name": "postamble_feedback", "description": "Show the STDOUT from the postamble, if there is any.
", "definition": "assert(postamble_result[\"stdout\"]=\"\",\n feedback(\n if(settings[\"postamble_feedback_whitespace\"],\n html(\"\"\"{escape_html(postamble_result[\"stdout\"])}\"\"\")\n ,\n postamble_result[\"stdout\"]\n )\n )\n);\nassert(postamble_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in the marking routine postamble: {escape_html(postamble_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in the marking routine postamble.\")\n )\n)"}, {"name": "matplotlib_preamble", "description": "Preamble for a hack to ensure that figures produced by matplotlib in Python are displayed.
\nThis code clears the matplotlib output, if matplotlib has been loaded.
", "definition": "if(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n plt.clf() \n\"\"\"),\n \"\"\n)"}, {"name": "matplotlib_postamble", "description": "A hack to show any figures produced with matplotlib in the stdout.
", "definition": "switch(\ncode_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n fig = plt.gcf()\n if fig.get_axes():\n fig.savefig(sys.stdout, format='svg')\n\"\"\"),\n \"\"\n)"}, {"name": "matplotlib_result", "description": "The result of running the matplotlib hack.
", "definition": "code_result[5]"}, {"name": "matplotlib_feedback", "description": "Feedback from the matplotlib hack: if a figure is produced, it's displayed as SVG here.
", "definition": "switch(\ncode_language=\"pyodide\",\n assert(matplotlib_result[\"stdout\"]=\"\",\n feedback(matplotlib_result[\"stdout\"])\n ),\n \"\"\n)\n\n"}, {"name": "images", "description": "Any images produced by the code blocks.
", "definition": "flatten(map(\n get(r,\"images\",[]),\n r,\n code_result\n))"}, {"name": "show_images", "description": "Show the images produced by the code.
", "definition": "assert(len(images)=0 or not settings[\"show_stdout\"],\n feedback(\"Your code produced the following {pluralise(len(images),'image','images')}:\");\n map(\n feedback(html(x)),\n x,\n images\n )\n)"}], "settings": [{"name": "show_input_hint", "label": "Show the input hint?", "help_url": "", "hint": "", "input_type": "checkbox", "default_value": true}, {"name": "code_language", "label": "Code language", "help_url": "", "hint": "The language that the student's code will be written in.", "input_type": "dropdown", "default_value": "pyodide", "choices": [{"value": "pyodide", "label": "Python"}, {"value": "webr", "label": "R"}]}, {"name": "correct_answer", "label": "Correct answer", "help_url": "", "hint": "A correct answer to the part.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "correct_answer_subvars", "label": "Substitute question variables into the correct answer?", "help_url": "", "hint": "If ticked, then JME expressions between curly braces will be evaluated and substituted into the correct answer.studentAnswer.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "preamble", "label": "Preamble", "help_url": "", "hint": "This code is run before the student's code. Define anything that the student's code or your tests need.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "postamble", "label": "Postamble", "help_url": "", "hint": "This code is run after the student's code but before the validation and unit tests.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "postamble_feedback_whitespace", "label": "Format postamble output as code?", "help_url": "", "hint": "If ticked, any output produced by the postamble will be formatted in monospace font, with whitespace preserved. If not ticked, it'll be presented as prose text or HTML.", "input_type": "checkbox", "default_value": false}, {"name": "tests", "label": "Marking tests", "help_url": "", "hint": "A list of tests used to mark the student's answer.A list of tests used to validate that the student's code is acceptable.
Each item is a list with two string values:
These questions are designed to help you verify that you can successfully apply the techniques related to maximum-likelihood estimation we have learned in class, e.g., numerical and analytical derivation of estimators, construction of large-sample confidence intervals, and likelihood-ratio tests.
", "licence": "None specified"}, "statement": "This question encompasses all the material presented in Chapter 2 of the notes. The setup for the entire question is as follows:
\nLet $Y_1, \\dotsc, Y_n \\overset{\\mathrm{iid}}{\\sim} \\mathrm{Normal}(\\mu, \\gamma)$, for some $\\mu \\in \\mathbb{R}$ and $\\gamma > 0$. Assume we have observed a realisation $\\underline{x} := (x_1, \\dotsc, x_n)$ of $\\underline{X} := (X_1, \\dotsc, X_n)$, where $X_i := \\exp(Y_i)$, for $i = 1, \\dotsc, n$.
\n", "advice": "See Canvas.
", "rulesets": {}, "builtin_constants": {"e": true, "pi,\u03c0": true, "i": true, "j": false}, "constants": [], "variables": {"log_likelihood": {"name": "log_likelihood", "group": "Ungrouped variables", "definition": "safe(\"x <- c(8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1)\\n\\nlog_likelihood <- function(pars) {\\n mu <- pars[1]\\n gamma <- pars[2]\\n log_like <- sum(dlnorm(x, meanlog = mu, sdlog = sqrt(gamma), log = TRUE))\\n return(log_like)\\n}\")", "description": "", "templateType": "long plain string", "can_override": false}, "log_placeholder": {"name": "log_placeholder", "group": "Ungrouped variables", "definition": "safe(\"x <- c(8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1)\\n\\nlog_likelihood <- function(pars) {\\n #code here\\n}\")", "description": "", "templateType": "long plain string", "can_override": false}, "correct_log_likelihood": {"name": "correct_log_likelihood", "group": "Ungrouped variables", "definition": "safe(\"x <- c(8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1)\\n\\ncorrect_log_likelihood <- function(pars) {\\n mu <- pars[1]\\n gamma <- pars[2]\\n log_like <- sum(dlnorm(x, meanlog = mu, sdlog = sqrt(gamma), log = TRUE))\\n return(log_like)\\n}\")", "description": "", "templateType": "long plain string", "can_override": false}, "Advice": {"name": "Advice", "group": "Ungrouped variables", "definition": "safe(\"### Part (a):\\n\\nThere are two methods to compute this. \\n###### Manipulation of cdf:\\n$$\\n\\\\begin{align*}\\n\tF_X(x) &= \\\\text{Pr}(X < x) \\\\\\\\\\n\t&= \\\\text{Pr}(\\\\exp(Y) < x) \\\\\\\\\\n\t&= \\\\text{Pr}(Y < \\\\log(x)) \\\\\\\\\\n\t&= F_Y(\\\\log(x)).\\n\\\\end{align*}\\n$$\\nTo compute the pdf, we just take derivatives with respect to $y$ of the cdf:\\n$$\\n\\\\begin{align*}\\n\tf_X(x) &= \\\\frac{\\\\partial}{\\\\partial x} F_Y(\\\\log(x)) \\\\\\\\\\n\t&= \\\\frac{1}{x}f_Y(\\\\log(x)).\\n\\\\end{align*}\\n$$\\nSince $X\\\\sim\\\\mathcal{N}(\\\\mu,\\\\gamma)$, we have\\n$$\\n\\\\begin{align*}\\n\t f_X(x) &= \\\\frac{1}{x}\\\\left[ \\\\frac{1}{\\\\sqrt{2\\\\pi\\\\gamma}}\\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}(\\\\log(x) - \\\\mu)^2\\\\right)\\\\right] \\\\\\\\\\n\t &= \\\\frac{1}{x\\\\sqrt{2\\\\pi\\\\gamma}}\\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}(\\\\log(x) - \\\\mu)^2\\\\right).\\n\\\\end{align*}\\n$$\\n###### Density under transformation:\\nIf $Y$ is a random variable with pdf $f_Y(y)$ and \\n$$ X = g(Y), $$\\nwhere $g$ is an invertible differentiable function, then the pdf $f_Y(y)$ of $Y$ is given by the formula\\n$$ f_X(x) = f_X(g^{-1}(x))\\\\left|\\\\frac{\\\\partial g^{-1}}{\\\\partial y}(x)\\\\right|. $$\\nIn order to use this formula we need:\\n\\nThe pdf $f_X(x)$. This is\\n$$ f_X(x) = \\\\frac{1}{\\\\sqrt{2 \\\\pi \\\\gamma}} \\\\exp\\\\left[-\\\\frac{(x-\\\\mu)^2}{2\\\\gamma}\\\\right]. $$\\nThe function $g(y)$. This is\\n$$ g(y) = \\\\exp(y). $$\\nThe inverse $g^{-1}(x)$ of $g(y)$. This is\\n$$ g^{-1}(x) = \\\\log(x) $$\\nThe derivative of the inverse $\\\\frac{\\\\partial g^{-1}}{\\\\partial x}(x)$. This is\\n$$ \\\\frac{\\\\partial g^{-1}}{\\\\partial x}(x) = \\\\frac{1}{x}. $$\\n\\nPlugging these values in to the formula, we obtain:\\n $$\\n\\\\begin{align*}\\n\tf_X(x) &= \\\\frac{1}{\\\\sqrt{2 \\\\pi \\\\gamma}} \\\\exp\\\\left[-\\\\frac{(\\\\log x-\\\\mu)^2}{2\\\\gamma}\\\\right] \\\\left|\\\\frac{1}{x}\\\\right| \\\\\\\\\\n\t&= \\\\frac{1}{x\\\\sqrt{2\\\\gamma\\\\pi}}\\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}(\\\\log(x) - \\\\mu)^2\\\\right).\\n\\\\end{align*}\\n$$\\nHere, the absolute value $\\\\left|\\\\frac{1}{x}\\\\right| = 1/x$ since $x > 0$.\\n### Part (b):\\nThe likelihood function is\\n$$\\n\\\\begin{align*}\\nL(\\\\mu,\\\\gamma; \\\\underline{x}) &= \\\\prod_{i=1}^n \\\\frac{1}{x_i\\\\sqrt{2\\\\gamma\\\\pi}}\\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}(\\\\log(x_i) - \\\\mu)^2\\\\right) \\\\\\\\\\n&= \\\\left(\\\\prod_{i=1}^n x_i\\\\right)^{-1} (2\\\\gamma\\\\pi)^{-n/2} \\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu)^2\\\\right) \\\\\\\\\\n&= \\\\bar{x}_g^{-n} (2\\\\gamma\\\\pi)^{-n/2} \\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu)^2\\\\right).\\n\\\\end{align*}\\n$$\\nFor the remaining parts, we require the log-likelihood function. This is\\n$$ \\n\\\\begin{align*}\\n\\\\ell(\\\\mu,\\\\gamma;\\\\underline{x}) &= -n\\\\log \\\\bar{x}_g -\\\\frac{n}{2}\\\\log(2\\\\pi\\\\gamma) - \\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n (\\\\log x_i - \\\\mu)^2 \\\\\\\\\\n&= -\\\\sum_{i=1}^n \\\\log(x_i) -\\\\frac{n}{2}\\\\log(2\\\\pi\\\\gamma) - \\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n (\\\\log x_i - \\\\mu)^2 .\\n\\\\end{align*}\\n$$\\n### Part (e):\\nUsing the likelihood function obtained in part (b), the log-likelihood is:\\n$$ \\\\ell(\\\\mu,\\\\gamma;\\\\underline{x}) = -\\\\sum_{i=1}^n\\\\log(x_i) - \\\\frac{n}{2}\\\\log(2\\\\pi) -\\\\frac{n}{2}\\\\log(\\\\gamma) - \\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i -\\\\mu)^2. $$\\nTherefore, the partial derivatives with respect to $\\\\mu$ and $\\\\gamma$ are:\\n$$\\\\begin{align} \\n\\\\frac{\\\\partial}{\\\\partial \\\\mu} \\\\ell(\\\\mu, \\\\gamma; \\\\underline{x}) &= \\\\frac{1}{\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu), \\\\\\\\ \\n\\\\frac{\\\\partial}{\\\\partial \\\\gamma} \\\\ell(\\\\mu, \\\\gamma; \\\\underline{x}) &= -\\\\frac{n}{2\\\\gamma} + \\\\frac{1}{2\\\\gamma^2}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu)^2. \\n\\\\end{align}$$\\n\\n### Part (f):\\nWe want to find $\\\\mu$ and $\\\\gamma$ such that the following two equations are satisfied simultaneously\\n$$ \\n\\\\begin{align}\\n\\\\frac{\\\\partial}{\\\\partial \\\\mu} \\\\ell(\\\\mu, \\\\gamma; \\\\underline{x}) &=0 ,\\\\\\\\\\n\\\\frac{\\\\partial}{\\\\partial \\\\gamma} \\\\ell(\\\\mu, \\\\gamma; \\\\underline{x}) &= 0.\\n\\\\end{align}\\n$$\\nUsing the partial derivatives obtained in part (e), we first note that, since $\\\\gamma > 0$, \\n$$ \\n\\\\begin{align*}\\n\\\\sum_{i=1}^n (\\\\log x_i - \\\\mu) &= 0\\\\\\\\\\n\\\\Rightarrow \\\\mu &= \\\\frac{1}{n}\\\\sum_{i=1}^n \\\\log(x_i).\\n\\\\end{align*}\\n$$\\nThis is the value of $\\\\mu$ that will maximise the likelihood function. To find the associated $\\\\gamma$ value, we solve the second equation:\\n$$\\n\\\\begin{align*}\\n-\\\\frac{n}{2\\\\gamma} + \\\\frac{1}{2\\\\gamma^2}\\\\sum_{i=1}^n\\\\left(\\\\log x_i - \\\\frac{1}{n}\\\\sum_{j=1}^n \\\\log(x_i)\\\\right)^2 &= 0\\\\\\\\\\n\\\\Rightarrow \\\\gamma &= \\\\frac{1}{n}\\\\sum_{i=1}^n\\\\left(\\\\log x_i - \\\\frac{1}{n}\\\\sum_{j=1}^n \\\\log(x_i)\\\\right)^2.\\n\\\\end{align*}\\n$$\\nTherefore, these $\\\\mu$ and $\\\\gamma$ values are the values that maximise the likelihood function.\\n\\n### Part (g):\\n\\nFrom part (f), we know the formulae for the maximum likelihood estimates of $\\\\mu$ and $\\\\gamma$. The data is, $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$. Note that $n = 9$ and\\n$$ \\\\sum_{i=1}^n \\\\log(x_i) = 21.334 \\\\, \\\\text{ (3 d.p.).}$$\\nTherefore,\\n$$\\\\hat{\\\\mu} = \\\\frac{21.334}{9} = 2.370 \\\\, \\\\text{ (3 d.p.),} $$\\nand,\\n$$\\n\\\\begin{align*}\\n\\\\hat{\\\\gamma} &= \\\\frac{1}{9}\\\\sum_{i=1}^n\\\\left(\\\\log(x_i) - 2.370\\\\right)^2 \\\\\\\\\\n&= 0.352 \\\\, \\\\text{ (3 d.p.).}\\n\\\\end{align*}\\n$$\\n### Part (h):\\n\\nWe require the Fisher information of $\\\\gamma$. Recall that $I(\\\\gamma) = -\\\\text{E}\\\\left[\\\\frac{\\\\partial^2}{\\\\partial \\\\gamma^2} \\\\ell(2,\\\\gamma; \\\\underline{X})\\\\right]$ . The second derivative of the log-likelihood with respect to $\\\\gamma$ is:\\n\\n$$ \\\\frac{\\\\partial^2}{\\\\partial\\\\gamma^2}\\\\ell(\\\\mu,\\\\gamma; \\\\underline{x}) = \\\\frac{n}{2\\\\gamma^2} - \\\\frac{1}{\\\\gamma^3}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu)^2 $$\\n\\nWe first compute the Fisher information for $\\\\gamma$,\\n$$ \\n\\\\begin{align*}I(\\\\gamma) &= -\\\\frac{n}{2\\\\gamma^2}+\\\\frac{1}{\\\\gamma^3}\\\\sum_{i=1}^n\\\\text{E}\\\\left[\\\\log(X_i)^2 - 2\\\\mu\\\\log(X_i) + \\\\mu^2\\\\right] \\\\\\\\\\n &= -\\\\frac{n}{2\\\\gamma^2} + \\\\frac{1}{\\\\gamma^3}\\\\sum_{i=1}^n \\\\mu^2 + \\\\gamma - 2\\\\mu^2 + \\\\mu^2 \\\\\\\\\\n&= -\\\\frac{n}{2\\\\gamma^2} + \\\\frac{n}{\\\\gamma^2} = \\\\frac{n}{2\\\\gamma^2}.\\n\\\\end{align*}\\n$$\\n### Part (i):\\nWe need to find the maximum-likelihood estimate of $\\\\gamma$ when $\\\\mu=2$. This is achieved by solving:\\n\\n$$ \\n\\\\begin{align*}\\n\\\\frac{\\\\partial}{\\\\partial \\\\gamma} \\\\ell(2, \\\\gamma; \\\\underline{x}) = -\\\\frac{n}{2\\\\gamma} + \\\\frac{1}{2\\\\gamma^2}\\\\sum_{i=1}^n(\\\\log x_i - 2)^2 &= 0 \\\\\\\\\\n\\\\Rightarrow \\\\gamma &= \\\\frac{1}{n}\\\\sum_{i=1}^n (\\\\log x_i - 2)^2\\n\\\\end{align*}\\n$$\\n\\nTherefore, the asymptotic distribution is:\\n\\n$$ \\\\hat{\\\\gamma} \\\\sim \\\\mathcal{N}\\\\left(\\\\gamma, v\\\\right), $$\\nwhere\\n$$ \\nv = I(\\\\hat{\\\\gamma})^{-1} = \\\\frac{2\\\\left(\\\\sum_{i=1}^n (\\\\log(x_i) - 2)^2\\\\right)^2}{n^3}\\n$$\\nTherefore, the 95% large sample confidence interval takes the form:\\n\\n$$\\\\frac{1}{n}\\\\sum_{i=1}^n (\\\\log x_i - 2)^2 \\\\pm 1.96\\\\times\\\\frac{2^{0.5}\\\\sum_{i=1}^n(\\\\log x_i - 2)^2}{n^{3/2}}$$\\nUsing the data, $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$, we have\\n\\n$$ \\\\sum_{i=1}^n (\\\\log x_i - 2)^2 = 4.407\\\\, \\\\text{ (3 d.p.)}. $$\\nAlso, $n = 9$ and so the 95% large sample confidence interval takes the form $L < \\\\gamma < U$, where\\n\\n$$ L = 0.037\\\\, \\\\text{ (3 d.p.)} \\\\text{ and } U = 0.942 \\\\, \\\\text{ (3 d.p.)}. $$\\n### Part (j):\\n\\nThe admissible set (or parameter space) is $\\\\Theta = \\{(\\\\mu, \\\\gamma) \\\\,|\\\\, \\\\mu \\\\in \\\\mathbb{R}, \\\\gamma \\\\in \\\\mathbb{R}^+\\}$ and the null-hypothesis set is $\\\\Omega = \\{(2,\\\\gamma) \\\\,|\\\\, \\\\gamma\\\\in\\\\mathbb{R}^+\\}$. \\n\\nTherefore, the degrees of freedom of the test-statistic is $d = \\\\text{dim}(\\\\Theta) - \\\\text{dim}(\\\\Omega) = 2 - 1 = 1$.\\n\\nNext, we need to compute the test-statistic $w$. In order to do this, we need to compute\\n\\n$$ \\\\max_{\\\\gamma > 0} \\\\ell(2, \\\\gamma; \\\\underline{x}). $$\\nNote that, from part (i), the maximum likelihood estimate of $\\\\gamma$ when $\\\\mu = 2$ is \\n\\n$$ \\\\hat{\\\\gamma} = \\\\frac{1}{n}\\\\sum_{i=1}^n (\\\\log x_i - 2)^2. $$\\nSubstituting this value into the log-likelihood, we obtain\\n\\n$$\\n\\\\begin{align*}\\n \\\\ell(2,\\\\gamma;\\\\underline{x}) &= -\\\\sum_{i=1}^n\\\\log(x_i) - \\\\frac{n}{2}\\\\log(2\\\\pi) -\\\\frac{n}{2}\\\\log(\\\\gamma) - \\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i -2)^2 \\\\\\\\\\n &= -\\\\sum_{i=1}^n\\\\log(x_i) - \\\\frac{n}{2}\\\\log(2\\\\pi) -\\\\frac{n}{2}\\\\log\\\\left(\\\\frac{1}{n}\\\\sum_{i=1}^n (\\\\log x_i - 2)^2\\\\right) - \\\\frac{n}{2} \\\\\\\\\\n &= -30.891 \\\\, \\\\text{ (3 d.p.)}.\\n \\\\end{align*}\\n $$\\nAlternatively, you could use your $\\\\texttt{log\\\\_likelihood}$ function defined in part (b) and plug in $\\\\texttt{c(2, 0.4896)}$. This is because $\\\\hat{\\\\gamma} = 0.4896 \\\\, \\\\text{ (4 d.p.)}$. \\n\\nNext, we compute the value of\\n$$ \\n\\\\max_{\\\\underline{\\\\theta}\\\\in \\\\Theta} \\\\ell(\\\\mu,\\\\gamma;\\\\underline{x}).\\n$$\\nRecall that, from part (f), the value of $\\\\mu$ and $\\\\gamma$ which maximise the log-likelihood are:\\n\\n$$ \\n\\\\begin{align*}\\n\\\\hat{\\\\mu} &= \\\\frac{1}{n}\\\\sum_{i=1}^n \\\\log x_i \\\\\\\\\\n\\\\hat{\\\\gamma} &= \\\\frac{1}{n}\\\\sum_{i=1}^n\\\\left(\\\\log(x_i) - \\\\frac{1}{n}\\\\sum_{j=1}^n \\\\log x_j\\\\right)^2\\n\\\\end{align*}\\n$$\\nSubstituting these values into the log-likelihood, we obtain:\\n$$\\n\\\\begin{align*}\\n \\\\ell(\\\\hat{\\\\mu},\\\\hat{\\\\gamma};\\\\underline{x}) &= -\\\\sum_{i=1}^n\\\\log(x_i) - \\\\frac{n}{2}\\\\log(2\\\\pi) -\\\\frac{n}{2}\\\\log(\\\\gamma) - \\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu)^2 \\\\\\\\\\n &= -\\\\sum_{i=1}^n\\\\log(x_i) - \\\\frac{n}{2}\\\\log(2\\\\pi) -\\\\frac{n}{2}\\\\log\\\\left(\\\\frac{1}{n}\\\\sum_{i=1}^n \\\\left(\\\\log x_i - \\\\frac{1}{n}\\\\sum_{j=1}^n\\\\log x_j\\\\right)^2\\\\right) - \\\\frac{n}{2} \\\\\\\\\\n &= -29.411 \\\\, \\\\text{ (3 d.p.)}.\\n \\\\end{align*}\\n$$\\nAlternatively, you could use your $\\\\texttt{log\\\\_likelihood}$ function defined in part (b) and plug in $\\\\texttt{c()}$. This is because $\\\\hat{\\\\mu} = 2$ and $\\\\hat{\\\\gamma} = 3$. \\n\\nTherefore, the value of the test-statistic is\\n\\n$$ w = -2(-30.891 - (-29.411)) = 2.960 \\\\, \\\\text{ (3 d.p.).} $$\\n\\nThe relevant value in $\\\\chi^2_d$ table is\\n$$ \\\\text{Pr}(X > 3.84) = 0.05. $$\\nSince $w = 2.960 < 3.84$, we fail to reject the null-hypothesis.\\n\\n\\n\")", "description": "", "templateType": "long plain string", "can_override": false}, "optimum_code": {"name": "optimum_code", "group": "Ungrouped variables", "definition": "safe(\"initial_choice <- c(-1.3, 1.7) \\n\\noptim(\\n par = initial_choice, \\n fn = log_likelihood, \\n lower = c(-Inf, 0.001), \\n upper = c(Inf, Inf),\\n method=\\'L-BFGS-B\\', \\n control = list(fnscale = -1)\\n)\")", "description": "", "templateType": "long plain string", "can_override": false}, "correct_optimum_code": {"name": "correct_optimum_code", "group": "Ungrouped variables", "definition": "safe(\"initial_choice <- c(-1.3, 1.7) \\n\\nanswer1 <- optim(\\n par = initial_choice, \\n fn = log_likelihood, \\n lower = c(-Inf, 0.001), \\n upper = c(Inf, Inf),\\n method=\\'L-BFGS-B\\', \\n control = list(fnscale = -1)\\n)\")", "description": "", "templateType": "long plain string", "can_override": false}, "optimum_placeholder": {"name": "optimum_placeholder", "group": "Ungrouped variables", "definition": "safe(\"initial_choice <- c(-1.3, 1.7) \\n\\noptim(#code here)\")", "description": "", "templateType": "long plain string", "can_override": false}}, "variablesTest": {"condition": "", "maxRuns": 100}, "ungrouped_variables": ["log_likelihood", "log_placeholder", "correct_log_likelihood", "Advice", "optimum_code", "correct_optimum_code", "optimum_placeholder"], "variable_groups": [], "functions": {}, "preamble": {"js": "", "css": ""}, "parts": [{"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Let $Y \\sim \\mathrm{Normal}(\\mu, \\gamma)$, for some $\\mu \\in \\mathbb{R}$ and $\\gamma > 0$. Which of the following is the probability density function for $X := \\exp(Y)$?
\nHint: Remember that if $X$ is a univariate real random variable with probability density function (PDF) $f_X$ and $h\\colon \\mathbb{R} \\to \\mathbb{R}$ is continuously differentiable and has an inverse $h^{-1}$, then $Z := h(X)$ has PDF $f_{Z}(z) = f_X(h^{-1}(z)) \\lvert \\frac{\\mathrm{d}}{\\mathrm{d} z} h^{-1}(z) \\rvert$, for $z \\in \\mathbb{R}$.
", "minMarks": 0, "maxMarks": 0, "shuffleChoices": true, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["$ f_X(x) = \\frac{1}{x\\sqrt{2\\gamma\\pi}}\\exp\\left(-\\frac{1}{2\\gamma}(\\log(x) - \\mu)^2\\right).$", "$ f_X(x) = \\frac{1}{\\sqrt{2\\gamma\\pi}}\\exp\\left(-\\frac{1}{2\\gamma}(\\log(x) - \\mu)^2\\right).$", "$ f_X(x) = \\frac{1}{x\\sqrt{2\\gamma\\pi}}\\exp\\left(-\\frac{1}{2\\gamma}(e^x - \\mu)^2\\right).$", "$ f_X(x) = \\frac{\\log(x)}{\\sqrt{2\\gamma\\pi}}\\exp\\left(-\\frac{1}{2\\gamma}(x - \\mu)^2\\right).$", "$ f_X(x) = \\frac{1}{\\sqrt{2\\gamma\\pi}}\\exp\\left(-\\frac{1}{\\gamma}(\\log(x) - \\mu)\\right).$"], "matrix": ["5", 0, 0, 0, 0], "distractors": ["It is this one!", "", "", "", ""]}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Let $Y_1, \\dotsc, Y_n \\overset{\\mathrm{iid}}{\\sim} \\mathrm{Normal}(\\mu, \\gamma)$, for some $\\mu \\in \\mathbb{R}$ and $\\gamma > 0$. Assume we have observed a realisation $\\underline{x} := (x_1, \\dotsc, x_n)$ of $\\underline{X} := (X_1, \\dotsc, X_n)$, where $X_i := \\exp(Y_i)$, for $i = 1, \\dotsc, n$.
\nWhich of the following functions is the likelihood function $L(\\underline{\\theta}; \\underline{x})$, where $\\underline{\\theta} = (\\mu, \\gamma)^{\\mathrm{T}}$?
\nHint: In the following, $\\bar{x}_{\\mathrm{g}} = (\\prod_{i=1}^n x_i)^{1/n}$ is the geometric mean of $x_1, \\dotsc, x_n$.
\n", "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["$L(\\underline{\\theta}; {\\underline{x}}) = \\bar{x}_{\\mathrm{g}}^{-n} (2\\gamma \\pi)^{-n/2} \\exp\\left\\{-\\frac{1}{2\\gamma}\\sum_{i=1}^n\\left(\\log x_i - \\mu\\right)^2\\right\\}$", "$L(\\underline{\\theta}; \\underline{x}) = (2\\gamma \\pi)^{-n/2} \\exp\\left\\{-\\frac{1}{2\\gamma}\\sum_{i=1}^n\\left(\\log x_i - \\mu\\right)^2\\right\\}$", "$L(\\underline{\\theta}; \\underline{x}) = (2\\gamma \\pi)^{-n/2} \\exp\\left\\{-\\frac{1}{2\\gamma}\\sum_{i=1}^n\\left(e^{x_i} - \\mu\\right)^2\\right\\}$", "$L(\\underline{\\theta}; \\underline{x}) = \\bar{x}_{\\mathrm{g}}^{n} (\\gamma \\pi)^{-n/2} \\exp\\left\\{-\\frac{1}{2\\gamma}\\sum_{i=1}^n\\left(\\log x_i - \\mu\\right)^2\\right\\}$", "$L(\\underline{\\theta}; \\underline{x}) = \\bar{x}_{\\mathrm{g}} (\\gamma \\pi)^{-n} \\exp\\left\\{-\\frac{1}{2\\gamma}\\sum_{i=1}^n\\left(\\log x_i - \\mu\\right)^2\\right\\}$"], "matrix": ["5", 0, 0, 0, 0], "distractors": ["It is this one!", "", "", "", ""]}, {"type": "mark-code-3", "useCustomName": false, "customName": "", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Write an $\\texttt{R}$ command that computes the maximum likelihood estimate of $\\underline{\\theta} = (\\mu,\\gamma)^{\\mathrm{T}}$ numerically. Your initial guess of the optimal $\\underline{\\theta}$ value should be $(-1.3, 1.7)$.
\nHint: the function optim will be useful.
In parts (e), (f) and (g), you will analytically compute the maximum likelihood estimate for $\\underline{\\theta} = (\\mu, \\gamma)^\\mathrm{T}$ and compare it to the value generated by your code in the previous parts.
\n\nUsing your likelihood function from part (b), and writing $\\ell(\\underline{\\theta}; \\underline{x}) = \\ell(\\mu, \\gamma; \\underline{x})$, which of the following are the correct values of $\\frac{\\partial}{\\partial \\mu} \\ell(\\mu, \\gamma; \\underline{{x}})$ and $\\frac{\\partial}{\\partial \\gamma} \\ell(\\mu, \\gamma; \\underline{x})$?
", "minMarks": 0, "maxMarks": 0, "shuffleChoices": true, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["\\begin{align}Using your partial derivatives from part (e), compute formulae for $\\mu$ and $\\gamma$ such that
\n$\\frac{\\partial}{\\partial \\mu} \\ell(\\mu, \\gamma; \\underline{{x}}) = 0, $
\n$\\frac{\\partial}{\\partial \\gamma} \\ell(\\mu, \\gamma; \\underline{{x}}) = 0. $
\nWhich of the following are the correct formulae for $\\mu$ and $\\gamma$?
", "minMarks": 0, "maxMarks": 0, "shuffleChoices": true, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["$\\mu = \\frac{1}{n}\\sum_{i=1}^n \\log x_i$Using the data $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$ from part (c), compute the maximum likelihood estimate of $\\mu$ and $\\gamma$ using your formulae from part (f).
\n$\\mu = $ [[0]]
\n$\\gamma = $ [[1]]
", "gaps": [{"type": "numberentry", "useCustomName": true, "customName": "mu", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "2.37047168681101", "maxValue": "2.37047168681101", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "2", "precisionType": "dp", "precision": "2", "precisionPartialCredit": "50", "precisionMessage": "You have not given your answer to the correct precision.", "strictPrecision": false, "showPrecisionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}, {"type": "numberentry", "useCustomName": true, "customName": "gamma", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "0.352389294367405", "maxValue": "0.352389294367405", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "2", "precisionType": "dp", "precision": "2", "precisionPartialCredit": "50", "precisionMessage": "You have not given your answer to the correct precision.", "strictPrecision": false, "showPrecisionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}], "sortAnswers": false}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "In this part, you will compute a large sample confidence interval for $\\gamma$ by fixing $\\mu = 2$.
\nSetting $\\mu=2$, which of the following functions is the Fisher information for $\\gamma$?
\nHint: If $\\mathbb{E}[\\log(X_i)] = \\mu$ and $\\mathbb{E}[\\log(X_i)^2] = \\gamma + \\mu^2$ for each $i$.
", "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["$\\mathcal{I}(\\gamma) = \\frac{n}{2\\gamma^2}$", "$\\mathcal{I}(\\gamma) = \\frac{n}{\\gamma^2}$", "$\\mathcal{I}(\\gamma) = \\frac{n}{2\\gamma^3}$", "$\\mathcal{I}(\\gamma) = \\frac{n}{\\gamma} + 2n$", "$\\mathcal{I}(\\gamma) = \\frac{2n}{\\gamma}$"], "matrix": ["5", 0, 0, 0, 0], "distractors": ["It is this one!", "", "", "", ""]}, {"type": "gapfill", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Using the data from part (c), $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$, compute a 95 % large sample confidence interval for $\\gamma$.
\nThe intervals takes the form $[l, u]$, where
$l= $[[0]]
and
\n$u = $ [[1]].
\nHint: Remember to set $\\mu = 2$ when computing the maximum-likelihood estimate of $\\gamma$.
", "gaps": [{"type": "numberentry", "useCustomName": true, "customName": "lower", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "0.03723561214181875", "maxValue": "0.03723561214181875", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "", "precisionType": "dp", "precision": "3", "precisionPartialCredit": "50", "precisionMessage": "You have not given your answer to the correct precision.", "strictPrecision": false, "showPrecisionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}, {"type": "numberentry", "useCustomName": true, "customName": "upper", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "0.942041518050176", "maxValue": "0.942041518050176", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "", "precisionType": "dp", "precision": "3", "precisionPartialCredit": "50", "precisionMessage": "You have not given your answer to the correct precision.", "strictPrecision": false, "showPrecisionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}], "sortAnswers": false}, {"type": "gapfill", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "In this final part, you will perform a likelihood ratio test on the parameters $\\underline{\\theta} := (\\mu, \\gamma)^\\mathrm{T}$.
\nThe null hypothesis is $H_0: \\mu = 2$.
\nWhat are the degrees of freedom $\\nu$ of the $\\chi^2_\\nu$ test statistic?
\n$\\nu = $[[0]].
\nUsing the data provided in part (c), $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$, compute the value of the test-statistic.
\n$\\lambda_{\\mathrm{LR}} = -2\\log\\left(\\frac{\\max_{\\underline{\\theta} \\in \\Theta_0} L(\\underline{\\theta};\\underline{x})}{\\max_{\\underline{\\theta} \\in \\Theta} L(\\underline{\\theta};\\underline{x})}\\right) =$ [[1]].
\nPerform a test at the $\\alpha = 0.05$ significance level. Do you reject $H_0$?
\n[[2]]
\n\n\n", "gaps": [{"type": "numberentry", "useCustomName": true, "customName": "df", "marks": "2", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "1", "maxValue": "1", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "", "showFractionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}, {"type": "numberentry", "useCustomName": true, "customName": "test_statistic", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "2.96037884052939", "maxValue": "2.96037884052939", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "", "precisionType": "dp", "precision": "3", "precisionPartialCredit": "50", "precisionMessage": "You have not given your answer to the correct precision.", "strictPrecision": true, "showPrecisionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}, {"type": "1_n_2", "useCustomName": true, "customName": "test", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["No", "Yes"], "matrix": ["4", 0], "distractors": ["", ""]}], "sortAnswers": false}], "partsMode": "all", "maxMarks": 0, "objectives": [], "penalties": [], "objectiveVisibility": "always", "penaltyVisibility": "always"}, {"name": "Multiple-choice questions", "extensions": [], "custom_part_types": [], "resources": [], "navigation": {"allowregen": true, "showfrontpage": false, "preventleave": false, "typeendtoleave": false}, "contributors": [{"name": "Axel Finke", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/31812/"}], "tags": [], "metadata": {"description": "These multiple-choice questions help you verify your understanding of key concepts from the lectures.
", "licence": "None specified"}, "statement": "Please answer the following multiple-choice questions. They are designed to help you verify your understanding of key concepts from the lectures.
", "advice": "", "rulesets": {}, "builtin_constants": {"e": true, "pi,\u03c0": true, "i": true, "j": false}, "constants": [], "variables": {}, "variablesTest": {"condition": "", "maxRuns": 100}, "ungrouped_variables": [], "variable_groups": [], "functions": {}, "preamble": {"js": "", "css": ""}, "parts": [{"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Which of the following statements is true?
", "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["Every unbiased estimator is consistent.", "Every consistent estimator is unbiased in the limit.", "An estimator can be biased yet consistent.", "An estimator can be unbiased but inconsistent if its variance goes to 0"], "matrix": [0, "0", "3", 0], "distractors": ["", "", "", ""]}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "
Let $\\underline{X} := (X_1, \\dotsc, X_n) \\sim f(\\theta)$, where $f(\\theta)$ is a distribution parametrised by $\\theta \\in \\mathbb{R}$ with probability density function $f(\\, \\cdot \\,; \\theta)$. Let $L(\\theta; \\underline{x})$ be the likelihood for a given realisation $\\underline{x}$ of $\\underline{X}$. Which of the following statements best describes the likelihood?
Which statement about a large-sample confidence interval for a parameter $\\theta$ based on the ML estimator is most correct?
", "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["It contains the true value with probability $(1-\\alpha)$ for any realised data set.", "In repeated samples, approximately a proportion of $(1- \\alpha)$ of such intervals will contain the true value of $\\theta$.", "Its width always decreases at the rate $1 / n$.", "It is exact if the ML estimator is unbiased."], "matrix": [0, "3", 0, 0], "distractors": ["", "", "", ""]}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Let $\\underline{X} = (X_1, ..., X_n)$ be a random sample from $f_X(\\theta)$, where $f_X(\\theta)$ is a distribution -- parameterised by $\\theta \\in \\mathbb{R}$ -- with twice differentiable probability density function $f_X(\\, \\cdot \\,; \\theta)$. Let $\\hat{\\theta} = h(\\underline{X})$ denote an estimator for $\\theta$. Conditional on $\\underline{X}$, which of the following correctly specifies the Fisher information at $\\hat{\\theta}$?
", "stepsPenalty": 0, "steps": [{"type": "information", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Start by considering the Fisher information at some fixed (i.e., deterministic) value $\\theta'$ and then plug in the random variable $\\hat{\\theta} = h(\\underline{X})$ at the very end.
"}], "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["$- \\sum_{i=1}^n \\mathbb{E}\\biggl[\\frac{\\partial^2}{\\partial \\theta^2} \\log f_X(Z_i; \\theta)\\biggr]\\bigg|_{\\theta = h(\\underline{Z})}$, where $Z_1, \\dotsc, Z_n \\overset{\\mathrm{iid}}{\\sim} f_X(\\theta)$ and $\\underline{Z} := (Z_1, \\dotsc, Z_n)$.", "$- \\sum_{i=1}^n \\mathbb{E}\\biggl[\\frac{\\partial^2}{\\partial \\theta^2} \\log f_X(Z_i; \\theta)\\bigg|_{\\theta = h(\\underline{Z})}\\biggr]$, where $Z_1, \\dotsc, Z_n \\overset{\\mathrm{iid}}{\\sim} f_X(h(\\underline{X}))$ and $\\underline{Z} := (Z_1, \\dotsc, Z_n)$.", "$- \\sum_{i=1}^n \\mathbb{E}\\biggl[\\frac{\\partial^2}{\\partial \\theta^2} \\log f_X(Z_i; \\theta)\\biggr]\\bigg|_{\\theta = h(\\underline{X})}$, where $Z_1, \\dotsc, Z_n \\overset{\\mathrm{iid}}{\\sim} f_X(h(\\underline{X}))$ and $\\underline{Z} := (Z_1, \\dotsc, Z_n)$.", "$- \\sum_{i=1}^n \\mathbb{E}\\biggl[\\frac{\\partial^2}{\\partial \\theta^2} \\log f_X(Z_i; \\theta)\\biggr]\\bigg|_{\\theta = h(\\underline{X})}$, where $Z_1, \\dotsc, Z_n \\overset{\\mathrm{iid}}{\\sim} f_X(\\theta)$ and $\\underline{Z} := (Z_1, \\dotsc, Z_n)$."], "matrix": [0, 0, "5", 0], "distractors": ["", "", "", ""]}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "
Which statement best describes the intuitive meaning of Fisher information $\\mathcal{I}(\\theta)$ where $\\theta$ is the true parameter value?
Consider some probabilistic model parameterised by $\\underline{\\theta} \\in \\Theta$ and assume that you wish to perform a likelihood-ratio test for some null hypothesis $H_0: \\underline{\\theta} \\in \\Theta_0$.
\nWhat is the number of degrees of freedom of the asymptotic chi-square distribution of the likelihood-ratio test statistic if $\\Theta = \\mathbb{R}^3$ and $\\Theta_0 = \\{0\\} \\times \\mathbb{R} \\times \\{1\\}$?
", "stepsPenalty": 0, "steps": [{"type": "information", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Think about how many free parameters you need to represent arbitrary elements of $\\Theta_0$.
"}], "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["1", "2", "3"], "matrix": [0, "3", 0], "distractors": ["", "", ""]}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Consider some probabilistic model parameterised by $\\underline{\\theta} \\in \\Theta$ and assume that you wish to perform a likelihood-ratio test for some null hypothesis $H_0: \\underline{\\theta} \\in \\Theta_0$.
\nWhat is the number of degrees of freedom of the asymptotic chi-square distribution of the likelihood-ratio test statistic if $\\Theta = \\mathbb{R}^5$ and $\\Theta_0 = \\{(\\theta_1,\\dotsc,\\theta_5)^{\\mathrm{T}} \\in \\Theta \\mid \\theta_1 = - 10 \\theta_2\\}$?
", "stepsPenalty": 0, "steps": [{"type": "information", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Think about how many free parameters you need to represent arbitrary elements of $\\Theta_0$.
"}], "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["1", "2", "3", "4", "5"], "matrix": ["3", 0, 0, "0", 0], "distractors": ["", "", "", "", ""]}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Consider some probabilistic model parameterised by $\\underline{\\theta} \\in \\Theta$ and assume that you wish to perform a likelihood-ratio test for some null hypothesis $H_0: \\underline{\\theta} \\in \\Theta_0$.
\nWhat is the number of degrees of freedom of the asymptotic chi-square distribution of the likelihood-ratio test statistic if $\\Theta = \\mathbb{R}^5$ and $\\Theta_0 = \\{(\\theta_1,\\dotsc,\\theta_5)^{\\mathrm{T}} \\in \\Theta \\mid \\theta_1 = - 10 \\theta_2, - \\theta_3 / 10 = \\theta_2, \\theta_1 = \\theta_3\\}$?
", "stepsPenalty": 0, "steps": [{"type": "information", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Think about how many free parameters you need to represent arbitrary elements of $\\Theta_0$.
"}], "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["1", "2", "3", "4", "5"], "matrix": [0, "3", 0, 0, 0], "distractors": ["", "", "", "", ""]}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Which of the following best describes the (realised) p-value in a hypothesis test for a given realisation of the data?
", "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["The (realised) p-value is the probability of a type-I error.", "If the (realised) p-value is larger than the chosen significance level, we reject the null hypothesis.", "The (realised) p-value is the probability that the null hypothesis is true.", "The (realised) p-value tells us the largest significance level at which we could have still rejected the null hypothesis for the given realisation of the data."], "matrix": [0, 0, 0, "3"], "distractors": ["", "", "", ""]}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "Assuming that the null hypothesis is correct, what is the distribution of the p-value before we have observed the realisation of the data? For simplicity, you may assume that the test statistic is continuous.
", "stepsPenalty": 0, "steps": [{"type": "information", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "For some random variable $X$ with continuous cumulative distribution function $F$ and $u \\in (0,1)$, compute the probability $\\mathbb{P}(F(X) \\leq u)$ to determine the distribution of $F(X)$ which then immediately gives you the distribution of $1 - F(X)$.
"}], "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["$\\mathrm{Uniform}(0,1)$.", "$\\mathrm{Normal}(0,1)$.", "$\\mathrm{Beta}(2, 2)$.", "$\\mathrm{Poisson}(1)$."], "matrix": ["5", 0, 0, 0], "distractors": ["", "", "", ""]}], "partsMode": "all", "maxMarks": 0, "objectives": [], "penalties": [], "objectiveVisibility": "always", "penaltyVisibility": "always"}]}], "allowPrinting": true, "navigation": {"allowregen": true, "reverse": true, "browse": true, "allowsteps": true, "showfrontpage": true, "navigatemode": "sequence", "onleave": {"action": "none", "message": ""}, "preventleave": true, "typeendtoleave": false, "startpassword": "", "autoSubmit": true, "allowAttemptDownload": false, "downloadEncryptionKey": "", "showresultspage": "oncompletion"}, "timing": {"allowPause": true, "timeout": {"action": "none", "message": ""}, "timedwarning": {"action": "none", "message": ""}}, "feedback": {"enterreviewmodeimmediately": true, "showactualmarkwhen": "inreview", "showtotalmarkwhen": "always", "showanswerstatewhen": "inreview", "showpartfeedbackmessageswhen": "inreview", "showexpectedanswerswhen": "inreview", "showadvicewhen": "inreview", "allowrevealanswer": true, "intro": "", "end_message": "", "results_options": {"printquestions": true, "printadvice": true}, "feedbackmessages": [], "reviewshowexpectedanswer": true, "showanswerstate": false, "reviewshowfeedback": true, "showactualmark": false, "showtotalmark": true, "reviewshowscore": true, "reviewshowadvice": true}, "diagnostic": {"knowledge_graph": {"topics": [], "learning_objectives": []}, "script": "diagnosys", "customScript": ""}, "contributors": [{"name": "Shweta Sharma", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/21418/"}, {"name": "Axel Finke", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/31812/"}], "extensions": ["programming"], "custom_part_types": [{"source": {"pk": 195, "author": {"name": "Christian Lawson-Perfect", "pk": 7}, "edit_page": "/part_type/195/edit"}, "name": "Code", "short_name": "mark-code-3", "description": "Mark code provided by the student by running it and a series of validation and marking tests.
\nThe validation tests are used to reject an answer if the student has misunderstood the task, for example if they haven't defined a required variable or function.
\nMarking tests check properties of the student's code. Each test awards a proportion of the available credit if it is passed.
\nYou can optionally show the student the STDOUT and/or STDERR when running their code.
\nYou can give a preamble and postamble which are run before and after the student's code, and also modify the student's code before running it.
", "help_url": "", "input_widget": "code-editor", "input_options": {"correctAnswer": "if(settings[\"correct_answer_subvars\"],\n render(settings[\"correct_answer\"])\n,\n settings[\"correct_answer\"]\n)", "hint": {"static": false, "value": "\"Write \"+capitalise(language_synonym(settings[\"code_language\"]))+\" code\""}, "language": {"static": false, "value": "language_synonym(settings[\"code_language\"])"}, "placeholder": {"static": false, "value": "if(settings[\"correct_answer_subvars\"],\n render(settings[\"placeholder\"])\n,\n settings[\"placeholder\"]\n)"}, "theme": {"static": true, "value": "textmate"}}, "can_be_gap": true, "can_be_step": true, "marking_script": "mark:\napply(main_error);\napply(show_images);\napply(matplotlib_feedback);\napply(postamble_feedback);\napply(validation_test_feedback);\napply(marking_test_feedback)\n\ninterpreted_answer:\nstudentAnswer\n\nmain_result:\ncode_result[3]\n\nmarking_results:\ncode_result[6..(len(settings[\"tests\"])+6)]\n\nvalidation_results:\ncode_result[(len(settings[\"tests\"])+6)..len(code_result)]\n\nmain_error:\nassert(main_stdout=\"\" or not settings[\"show_stdout\"],\n feedback(\"Your code produced this output:{escape_html(main_stdout)}\")\n);\nassert(main_result[\"success\"],\n warn(\"\"\"There was an error in your code.\"\"\");\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in your code: {escape_html(main_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in your code.\")\n )\n)\n\nmarking_test_feedback:\nmap(\n let(\n [name,weight,code], test,\n header, \"Test: {name} \",\n if(r[\"success\"],\n let(\n result, r[\"result\"],\n max_credit, weight/total_weight,\n credit, if(result isa \"number\", result, award(1,result)),\n switch(\n credit=0, negative_feedback(header+\"was not passed.\"),\n credit=1, add_credit(max_credit, header+\"was passed.\"),\n add_credit(credit*max_credit, header+\"was partially passed.\")\n )\n )\n ,\n if(settings[\"show_marking_errors\"],\n negative_feedback(\"\"\"There was an error: {escape_html(r[\"stderr\"])}\"\"\")\n ,\n negative_feedback(header+\"was not passed.\")\n )\n )\n ),\n [test,r],\n zip(settings[\"tests\"],marking_results)\n)\n\nvalidation_test_feedback:\nmap(\n let([name,code], test,\n if(r[\"success\"],\n if(r[\"result\"],\n true\n ,\n warn(\"\"\"Your code failed the test {name}.\"\"\");\n fail(\"\"\"Your code failed the test {name}.\"\"\");false\n )\n ,\n warn(\"\"\"There was an error running the test {name}.\"\"\");\n fail(\"\"\"There was an error running the test {name}: {escape_html(r[\"stderr\"])}\"\"\")\n )\n ),\n [test,r],\n zip(settings[\"validation_tests\"],validation_results)\n)\n\ntotal_weight:\nsum(map(weight,[name,weight,code],settings[\"tests\"]))\n\npre_submit:\nif(studentAnswer=nothing,\n []\n,\n [run_code(code_language,\n [\n matplotlib_preamble,\n variables_as_code(language_synonym(code_language), settings[\"variables\"]),\n render(settings[\"preamble\"]),\n if(trim(settings[\"modifier\"])=\"\", studentAnswer, eval(expression(settings[\"modifier\"]))),\n render(settings[\"postamble\"]),\n matplotlib_postamble\n ]\n +map(code,[name,marks,code],settings[\"tests\"])\n +map(code,[name,code],settings[\"validation_tests\"])\n )]\n)\n\ncode_result:\npre_submit[\"code_result\"]\n\nmain_stdout:\nsafe(main_result[\"stdout\"])\n\ncode_language:\nsettings[\"code_language\"]\n\npreamble_result:\ncode_result[2]\n\npreamble_stderr:\npreamble_result[\"stderr\"]\n\npostamble_result:\ncode_result[4]\n\npostamble_stderr:\npostamble_result[\"stderr\"]\n\npostamble_feedback:\nassert(postamble_result[\"stdout\"]=\"\",\n feedback(\n if(settings[\"postamble_feedback_whitespace\"],\n html(\"\"\"{escape_html(postamble_result[\"stdout\"])}\"\"\")\n ,\n postamble_result[\"stdout\"]\n )\n )\n);\nassert(postamble_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in the marking routine postamble: {escape_html(postamble_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in the marking routine postamble.\")\n )\n)\n\nmatplotlib_preamble:\nif(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n plt.clf() \n\"\"\"),\n \"\"\n)\n\nmatplotlib_postamble:\nswitch(\ncode_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n fig = plt.gcf()\n if fig.get_axes():\n fig.savefig(sys.stdout, format='svg')\n\"\"\"),\n \"\"\n)\n\nmatplotlib_result:\ncode_result[5]\n\nmatplotlib_feedback:\nswitch(\ncode_language=\"pyodide\",\n assert(matplotlib_result[\"stdout\"]=\"\",\n feedback(matplotlib_result[\"stdout\"])\n ),\n \"\"\n)\n\n\n\nimages:\nflatten(map(\n get(r,\"images\",[]),\n r,\n code_result\n))\n\nshow_images:\nassert(len(images)=0 or not settings[\"show_stdout\"],\n feedback(\"Your code produced the following {pluralise(len(images),'image','images')}:\");\n map(\n feedback(html(x)),\n x,\n images\n )\n)", "marking_notes": [{"name": "mark", "description": "This is the main marking note. It should award credit and provide feedback based on the student's answer.", "definition": "apply(main_error);\napply(show_images);\napply(matplotlib_feedback);\napply(postamble_feedback);\napply(validation_test_feedback);\napply(marking_test_feedback)"}, {"name": "interpreted_answer", "description": "A value representing the student's answer to this part.", "definition": "studentAnswer"}, {"name": "main_result", "description": "The result of running the student's code and the preamble, without any tests.
\nNormally used to detect errors in the student's code.
", "definition": "code_result[3]"}, {"name": "marking_results", "description": "The results of running the marking tests.
", "definition": "code_result[6..(len(settings[\"tests\"])+6)]"}, {"name": "validation_results", "description": "The results of running the validation tests.
", "definition": "code_result[(len(settings[\"tests\"])+6)..len(code_result)]"}, {"name": "main_error", "description": "Show STDOUT if allowed.
\nCheck the student's code runs on its own. Fail if there was an error, and show STDERR if allowed.
", "definition": "assert(main_stdout=\"\" or not settings[\"show_stdout\"],\n feedback(\"Your code produced this output:{escape_html(main_stdout)}\")\n);\nassert(main_result[\"success\"],\n warn(\"\"\"There was an error in your code.\"\"\");\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in your code: {escape_html(main_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in your code.\")\n )\n)"}, {"name": "marking_test_feedback", "description": "Feedback on the marking tests. For each test, if the test was passed then add the corresponding amount of credit. If there was an error, show the error.
", "definition": "map(\n let(\n [name,weight,code], test,\n header, \"Test: {name} \",\n if(r[\"success\"],\n let(\n result, r[\"result\"],\n max_credit, weight/total_weight,\n credit, if(result isa \"number\", result, award(1,result)),\n switch(\n credit=0, negative_feedback(header+\"was not passed.\"),\n credit=1, add_credit(max_credit, header+\"was passed.\"),\n add_credit(credit*max_credit, header+\"was partially passed.\")\n )\n )\n ,\n if(settings[\"show_marking_errors\"],\n negative_feedback(\"\"\"There was an error:{escape_html(r[\"stderr\"])}\"\"\")\n ,\n negative_feedback(header+\"was not passed.\")\n )\n )\n ),\n [test,r],\n zip(settings[\"tests\"],marking_results)\n)"}, {"name": "validation_test_feedback", "description": "Give feedback on the validation tests. If any of them are not passed, the student's answer is invalid.
", "definition": "map(\n let([name,code], test,\n if(r[\"success\"],\n if(r[\"result\"],\n true\n ,\n warn(\"\"\"Your code failed the test {name}.\"\"\");\n fail(\"\"\"Your code failed the test {name}.\"\"\");false\n )\n ,\n warn(\"\"\"There was an error running the test {name}.\"\"\");\n fail(\"\"\"There was an error running the test {name}:{escape_html(r[\"stderr\"])}\"\"\")\n )\n ),\n [test,r],\n zip(settings[\"validation_tests\"],validation_results)\n)"}, {"name": "total_weight", "description": "The sum of the weights of the marking tests. Each test's weight is divided by this to produce a proportion of the available credit.
", "definition": "sum(map(weight,[name,weight,code],settings[\"tests\"]))"}, {"name": "pre_submit", "description": "The code blocks to run.
\nIn order, they are:
\nThe results of the code blocks: a list with an entry corresponding to each block of code.
", "definition": "pre_submit[\"code_result\"]"}, {"name": "main_stdout", "description": "The stdout from the student's code.
", "definition": "safe(main_result[\"stdout\"])"}, {"name": "code_language", "description": "The language the code is written in. Either \"pyodide\" (Python) or \"webr\" (R)
", "definition": "settings[\"code_language\"]"}, {"name": "preamble_result", "description": "The result of running the preamble block.
", "definition": "code_result[2]"}, {"name": "preamble_stderr", "description": "The STDERR produced by the preamble block.
", "definition": "preamble_result[\"stderr\"]"}, {"name": "postamble_result", "description": "The result of running the postamble.
", "definition": "code_result[4]"}, {"name": "postamble_stderr", "description": "The STDERR produced by the postamble block.
", "definition": "postamble_result[\"stderr\"]"}, {"name": "postamble_feedback", "description": "Show the STDOUT from the postamble, if there is any.
", "definition": "assert(postamble_result[\"stdout\"]=\"\",\n feedback(\n if(settings[\"postamble_feedback_whitespace\"],\n html(\"\"\"{escape_html(postamble_result[\"stdout\"])}\"\"\")\n ,\n postamble_result[\"stdout\"]\n )\n )\n);\nassert(postamble_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in the marking routine postamble: {escape_html(postamble_result[\"stderr\"])}\"\"\")\n ,\n fail(\"There was an error in the marking routine postamble.\")\n )\n)"}, {"name": "matplotlib_preamble", "description": "Preamble for a hack to ensure that figures produced by matplotlib in Python are displayed.
\nThis code clears the matplotlib output, if matplotlib has been loaded.
", "definition": "if(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n plt.clf() \n\"\"\"),\n \"\"\n)"}, {"name": "matplotlib_postamble", "description": "A hack to show any figures produced with matplotlib in the stdout.
", "definition": "switch(\ncode_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n fig = plt.gcf()\n if fig.get_axes():\n fig.savefig(sys.stdout, format='svg')\n\"\"\"),\n \"\"\n)"}, {"name": "matplotlib_result", "description": "The result of running the matplotlib hack.
", "definition": "code_result[5]"}, {"name": "matplotlib_feedback", "description": "Feedback from the matplotlib hack: if a figure is produced, it's displayed as SVG here.
", "definition": "switch(\ncode_language=\"pyodide\",\n assert(matplotlib_result[\"stdout\"]=\"\",\n feedback(matplotlib_result[\"stdout\"])\n ),\n \"\"\n)\n\n"}, {"name": "images", "description": "Any images produced by the code blocks.
", "definition": "flatten(map(\n get(r,\"images\",[]),\n r,\n code_result\n))"}, {"name": "show_images", "description": "Show the images produced by the code.
", "definition": "assert(len(images)=0 or not settings[\"show_stdout\"],\n feedback(\"Your code produced the following {pluralise(len(images),'image','images')}:\");\n map(\n feedback(html(x)),\n x,\n images\n )\n)"}], "settings": [{"name": "show_input_hint", "label": "Show the input hint?", "help_url": "", "hint": "", "input_type": "checkbox", "default_value": true}, {"name": "code_language", "label": "Code language", "help_url": "", "hint": "The language that the student's code will be written in.", "input_type": "dropdown", "default_value": "pyodide", "choices": [{"value": "pyodide", "label": "Python"}, {"value": "webr", "label": "R"}]}, {"name": "correct_answer", "label": "Correct answer", "help_url": "", "hint": "A correct answer to the part.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "correct_answer_subvars", "label": "Substitute question variables into the correct answer?", "help_url": "", "hint": "If ticked, then JME expressions between curly braces will be evaluated and substituted into the correct answer.studentAnswer.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "preamble", "label": "Preamble", "help_url": "", "hint": "This code is run before the student's code. Define anything that the student's code or your tests need.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "postamble", "label": "Postamble", "help_url": "", "hint": "This code is run after the student's code but before the validation and unit tests.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "postamble_feedback_whitespace", "label": "Format postamble output as code?", "help_url": "", "hint": "If ticked, any output produced by the postamble will be formatted in monospace font, with whitespace preserved. If not ticked, it'll be presented as prose text or HTML.", "input_type": "checkbox", "default_value": false}, {"name": "tests", "label": "Marking tests", "help_url": "", "hint": "A list of tests used to mark the student's answer.A list of tests used to validate that the student's code is acceptable.
Each item is a list with two string values: