// Numbas version: finer_feedback_settings {"name": "Application of maximum-likelihood estimation", "extensions": ["programming"], "custom_part_types": [{"source": {"pk": 195, "author": {"name": "Christian Lawson-Perfect", "pk": 7}, "edit_page": "/part_type/195/edit"}, "name": "Code", "short_name": "mark-code-3", "description": "

Mark code provided by the student by running it and a series of validation and marking tests.

\n

The validation tests are used to reject an answer if the student has misunderstood the task, for example if they haven't defined a required variable or function.

\n

Marking tests check properties of the student's code. Each test awards a proportion of the available credit if it is passed.

\n

You can optionally show the student the STDOUT and/or STDERR when running their code.

\n

You can give a preamble and postamble which are run before and after the student's code, and also modify the student's code before running it.

", "help_url": "", "input_widget": "code-editor", "input_options": {"correctAnswer": "if(settings[\"correct_answer_subvars\"],\n render(settings[\"correct_answer\"])\n,\n settings[\"correct_answer\"]\n)", "hint": {"static": false, "value": "\"Write \"+capitalise(language_synonym(settings[\"code_language\"]))+\" code\""}, "language": {"static": false, "value": "language_synonym(settings[\"code_language\"])"}, "placeholder": {"static": false, "value": "if(settings[\"correct_answer_subvars\"],\n render(settings[\"placeholder\"])\n,\n settings[\"placeholder\"]\n)"}, "theme": {"static": true, "value": "textmate"}}, "can_be_gap": true, "can_be_step": true, "marking_script": "mark:\napply(main_error);\napply(show_images);\napply(matplotlib_feedback);\napply(postamble_feedback);\napply(validation_test_feedback);\napply(marking_test_feedback)\n\ninterpreted_answer:\nstudentAnswer\n\nmain_result:\ncode_result[3]\n\nmarking_results:\ncode_result[6..(len(settings[\"tests\"])+6)]\n\nvalidation_results:\ncode_result[(len(settings[\"tests\"])+6)..len(code_result)]\n\nmain_error:\nassert(main_stdout=\"\" or not settings[\"show_stdout\"],\n feedback(\"Your code produced this output:
{escape_html(main_stdout)}
\")\n);\nassert(main_result[\"success\"],\n warn(\"\"\"There was an error in your code.\"\"\");\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in your code:
{escape_html(main_result[\"stderr\"])}
\"\"\")\n ,\n fail(\"There was an error in your code.\")\n )\n)\n\nmarking_test_feedback:\nmap(\n let(\n [name,weight,code], test,\n header, \"Test: {name} \",\n if(r[\"success\"],\n let(\n result, r[\"result\"],\n max_credit, weight/total_weight,\n credit, if(result isa \"number\", result, award(1,result)),\n switch(\n credit=0, negative_feedback(header+\"was not passed.\"),\n credit=1, add_credit(max_credit, header+\"was passed.\"),\n add_credit(credit*max_credit, header+\"was partially passed.\")\n )\n )\n ,\n if(settings[\"show_marking_errors\"],\n negative_feedback(\"\"\"There was an error:
{escape_html(r[\"stderr\"])}
\"\"\")\n ,\n negative_feedback(header+\"was not passed.\")\n )\n )\n ),\n [test,r],\n zip(settings[\"tests\"],marking_results)\n)\n\nvalidation_test_feedback:\nmap(\n let([name,code], test,\n if(r[\"success\"],\n if(r[\"result\"],\n true\n ,\n warn(\"\"\"Your code failed the test {name}.\"\"\");\n fail(\"\"\"Your code failed the test {name}.\"\"\");false\n )\n ,\n warn(\"\"\"There was an error running the test {name}.\"\"\");\n fail(\"\"\"There was an error running the test {name}:
{escape_html(r[\"stderr\"])}
\"\"\")\n )\n ),\n [test,r],\n zip(settings[\"validation_tests\"],validation_results)\n)\n\ntotal_weight:\nsum(map(weight,[name,weight,code],settings[\"tests\"]))\n\npre_submit:\nif(studentAnswer=nothing,\n []\n,\n [run_code(code_language,\n [\n matplotlib_preamble,\n variables_as_code(language_synonym(code_language), settings[\"variables\"]),\n render(settings[\"preamble\"]),\n if(trim(settings[\"modifier\"])=\"\", studentAnswer, eval(expression(settings[\"modifier\"]))),\n render(settings[\"postamble\"]),\n matplotlib_postamble\n ]\n +map(code,[name,marks,code],settings[\"tests\"])\n +map(code,[name,code],settings[\"validation_tests\"])\n )]\n)\n\ncode_result:\npre_submit[\"code_result\"]\n\nmain_stdout:\nsafe(main_result[\"stdout\"])\n\ncode_language:\nsettings[\"code_language\"]\n\npreamble_result:\ncode_result[2]\n\npreamble_stderr:\npreamble_result[\"stderr\"]\n\npostamble_result:\ncode_result[4]\n\npostamble_stderr:\npostamble_result[\"stderr\"]\n\npostamble_feedback:\nassert(postamble_result[\"stdout\"]=\"\",\n feedback(\n if(settings[\"postamble_feedback_whitespace\"],\n html(\"\"\"
{escape_html(postamble_result[\"stdout\"])}
\"\"\")\n ,\n postamble_result[\"stdout\"]\n )\n )\n);\nassert(postamble_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in the marking routine postamble:
{escape_html(postamble_result[\"stderr\"])}
\"\"\")\n ,\n fail(\"There was an error in the marking routine postamble.\")\n )\n)\n\nmatplotlib_preamble:\nif(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n plt.clf() \n\"\"\"),\n \"\"\n)\n\nmatplotlib_postamble:\nswitch(\ncode_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n fig = plt.gcf()\n if fig.get_axes():\n fig.savefig(sys.stdout, format='svg')\n\"\"\"),\n \"\"\n)\n\nmatplotlib_result:\ncode_result[5]\n\nmatplotlib_feedback:\nswitch(\ncode_language=\"pyodide\",\n assert(matplotlib_result[\"stdout\"]=\"\",\n feedback(matplotlib_result[\"stdout\"])\n ),\n \"\"\n)\n\n\n\nimages:\nflatten(map(\n get(r,\"images\",[]),\n r,\n code_result\n))\n\nshow_images:\nassert(len(images)=0 or not settings[\"show_stdout\"],\n feedback(\"Your code produced the following {pluralise(len(images),'image','images')}:\");\n map(\n feedback(html(x)),\n x,\n images\n )\n)", "marking_notes": [{"name": "mark", "description": "This is the main marking note. It should award credit and provide feedback based on the student's answer.", "definition": "apply(main_error);\napply(show_images);\napply(matplotlib_feedback);\napply(postamble_feedback);\napply(validation_test_feedback);\napply(marking_test_feedback)"}, {"name": "interpreted_answer", "description": "A value representing the student's answer to this part.", "definition": "studentAnswer"}, {"name": "main_result", "description": "

The result of running the student's code and the preamble, without any tests.

\n

Normally used to detect errors in the student's code.

", "definition": "code_result[3]"}, {"name": "marking_results", "description": "

The results of running the marking tests.

", "definition": "code_result[6..(len(settings[\"tests\"])+6)]"}, {"name": "validation_results", "description": "

The results of running the validation tests.

", "definition": "code_result[(len(settings[\"tests\"])+6)..len(code_result)]"}, {"name": "main_error", "description": "

Show STDOUT if allowed.

\n

Check the student's code runs on its own. Fail if there was an error, and show STDERR if allowed.

", "definition": "assert(main_stdout=\"\" or not settings[\"show_stdout\"],\n feedback(\"Your code produced this output:
{escape_html(main_stdout)}
\")\n);\nassert(main_result[\"success\"],\n warn(\"\"\"There was an error in your code.\"\"\");\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in your code:
{escape_html(main_result[\"stderr\"])}
\"\"\")\n ,\n fail(\"There was an error in your code.\")\n )\n)"}, {"name": "marking_test_feedback", "description": "

Feedback on the marking tests. For each test, if the test was passed then add the corresponding amount of credit. If there was an error, show the error.

", "definition": "map(\n let(\n [name,weight,code], test,\n header, \"Test: {name} \",\n if(r[\"success\"],\n let(\n result, r[\"result\"],\n max_credit, weight/total_weight,\n credit, if(result isa \"number\", result, award(1,result)),\n switch(\n credit=0, negative_feedback(header+\"was not passed.\"),\n credit=1, add_credit(max_credit, header+\"was passed.\"),\n add_credit(credit*max_credit, header+\"was partially passed.\")\n )\n )\n ,\n if(settings[\"show_marking_errors\"],\n negative_feedback(\"\"\"There was an error:
{escape_html(r[\"stderr\"])}
\"\"\")\n ,\n negative_feedback(header+\"was not passed.\")\n )\n )\n ),\n [test,r],\n zip(settings[\"tests\"],marking_results)\n)"}, {"name": "validation_test_feedback", "description": "

Give feedback on the validation tests. If any of them are not passed, the student's answer is invalid.

", "definition": "map(\n let([name,code], test,\n if(r[\"success\"],\n if(r[\"result\"],\n true\n ,\n warn(\"\"\"Your code failed the test {name}.\"\"\");\n fail(\"\"\"Your code failed the test {name}.\"\"\");false\n )\n ,\n warn(\"\"\"There was an error running the test {name}.\"\"\");\n fail(\"\"\"There was an error running the test {name}:
{escape_html(r[\"stderr\"])}
\"\"\")\n )\n ),\n [test,r],\n zip(settings[\"validation_tests\"],validation_results)\n)"}, {"name": "total_weight", "description": "

The sum of the weights of the marking tests. Each test's weight is divided by this to produce a proportion of the available credit.

", "definition": "sum(map(weight,[name,weight,code],settings[\"tests\"]))"}, {"name": "pre_submit", "description": "

The code blocks to run.

\n

In order, they are:

\n", "definition": "if(studentAnswer=nothing,\n []\n,\n [run_code(code_language,\n [\n matplotlib_preamble,\n variables_as_code(language_synonym(code_language), settings[\"variables\"]),\n render(settings[\"preamble\"]),\n if(trim(settings[\"modifier\"])=\"\", studentAnswer, eval(expression(settings[\"modifier\"]))),\n render(settings[\"postamble\"]),\n matplotlib_postamble\n ]\n +map(code,[name,marks,code],settings[\"tests\"])\n +map(code,[name,code],settings[\"validation_tests\"])\n )]\n)"}, {"name": "code_result", "description": "

The results of the code blocks: a list with an entry corresponding to each block of code.

", "definition": "pre_submit[\"code_result\"]"}, {"name": "main_stdout", "description": "

The stdout from the student's code.

", "definition": "safe(main_result[\"stdout\"])"}, {"name": "code_language", "description": "

The language the code is written in. Either \"pyodide\" (Python) or \"webr\" (R)

", "definition": "settings[\"code_language\"]"}, {"name": "preamble_result", "description": "

The result of running the preamble block.

", "definition": "code_result[2]"}, {"name": "preamble_stderr", "description": "

The STDERR produced by the preamble block.

", "definition": "preamble_result[\"stderr\"]"}, {"name": "postamble_result", "description": "

The result of running the postamble.

", "definition": "code_result[4]"}, {"name": "postamble_stderr", "description": "

The STDERR produced by the postamble block.

", "definition": "postamble_result[\"stderr\"]"}, {"name": "postamble_feedback", "description": "

Show the STDOUT from the postamble, if there is any.

", "definition": "assert(postamble_result[\"stdout\"]=\"\",\n feedback(\n if(settings[\"postamble_feedback_whitespace\"],\n html(\"\"\"
{escape_html(postamble_result[\"stdout\"])}
\"\"\")\n ,\n postamble_result[\"stdout\"]\n )\n )\n);\nassert(postamble_result[\"success\"],\n if(settings[\"show_stderr\"],\n fail(\"\"\"There was an error in the marking routine postamble:
{escape_html(postamble_result[\"stderr\"])}
\"\"\")\n ,\n fail(\"There was an error in the marking routine postamble.\")\n )\n)"}, {"name": "matplotlib_preamble", "description": "

Preamble for a hack to ensure that figures produced by matplotlib in Python are displayed.

\n

This code clears the matplotlib output, if matplotlib has been loaded.

", "definition": "if(code_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n plt.clf() \n\"\"\"),\n \"\"\n)"}, {"name": "matplotlib_postamble", "description": "

A hack to show any figures produced with matplotlib in the stdout.

", "definition": "switch(\ncode_language=\"pyodide\",\n safe(\"\"\"\nimport sys\nif 'matplotlib' in sys.modules:\n import matplotlib.pyplot as plt\n fig = plt.gcf()\n if fig.get_axes():\n fig.savefig(sys.stdout, format='svg')\n\"\"\"),\n \"\"\n)"}, {"name": "matplotlib_result", "description": "

The result of running the matplotlib hack.

", "definition": "code_result[5]"}, {"name": "matplotlib_feedback", "description": "

Feedback from the matplotlib hack: if a figure is produced, it's displayed as SVG here.

", "definition": "switch(\ncode_language=\"pyodide\",\n assert(matplotlib_result[\"stdout\"]=\"\",\n feedback(matplotlib_result[\"stdout\"])\n ),\n \"\"\n)\n\n"}, {"name": "images", "description": "

Any images produced by the code blocks.

", "definition": "flatten(map(\n get(r,\"images\",[]),\n r,\n code_result\n))"}, {"name": "show_images", "description": "

Show the images produced by the code.

", "definition": "assert(len(images)=0 or not settings[\"show_stdout\"],\n feedback(\"Your code produced the following {pluralise(len(images),'image','images')}:\");\n map(\n feedback(html(x)),\n x,\n images\n )\n)"}], "settings": [{"name": "show_input_hint", "label": "Show the input hint?", "help_url": "", "hint": "", "input_type": "checkbox", "default_value": true}, {"name": "code_language", "label": "Code language", "help_url": "", "hint": "The language that the student's code will be written in.", "input_type": "dropdown", "default_value": "pyodide", "choices": [{"value": "pyodide", "label": "Python"}, {"value": "webr", "label": "R"}]}, {"name": "correct_answer", "label": "Correct answer", "help_url": "", "hint": "A correct answer to the part.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "correct_answer_subvars", "label": "Substitute question variables into the correct answer?", "help_url": "", "hint": "If ticked, then JME expressions between curly braces will be evaluated and substituted into the correct answer.

If not ticked, then the correct answer will be displayed exactly as it is.", "input_type": "checkbox", "default_value": true}, {"name": "show_stdout", "label": "Show stdout?", "help_url": "", "hint": "If ticked, the STDOUT produced after running the student's code will be shown in the feedback.", "input_type": "checkbox", "default_value": true}, {"name": "show_stderr", "label": "Show stderr?", "help_url": "", "hint": "If ticked, the STDERR produced after running the student's code will be shown in the feedback.", "input_type": "checkbox", "default_value": true}, {"name": "show_marking_errors", "label": "Show errors produced by marking tests?", "help_url": "", "hint": "", "input_type": "checkbox", "default_value": false}, {"name": "placeholder", "label": "Placeholder", "help_url": "", "hint": "Initial text for the code editor", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "modifier", "label": "Student code modifier", "help_url": "", "hint": "JME expression to modify the student's submitted code before being passed to the marking template. The student's code is available as the string variable studentAnswer.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "preamble", "label": "Preamble", "help_url": "", "hint": "This code is run before the student's code. Define anything that the student's code or your tests need.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "postamble", "label": "Postamble", "help_url": "", "hint": "This code is run after the student's code but before the validation and unit tests.", "input_type": "code", "default_value": "", "evaluate": false}, {"name": "postamble_feedback_whitespace", "label": "Format postamble output as code?", "help_url": "", "hint": "If ticked, any output produced by the postamble will be formatted in monospace font, with whitespace preserved. If not ticked, it'll be presented as prose text or HTML.", "input_type": "checkbox", "default_value": false}, {"name": "tests", "label": "Marking tests", "help_url": "", "hint": "A list of tests used to mark the student's answer.
Each item is a list with three values:
\n", "input_type": "code", "default_value": "[\n [\"Test 1\", 1, \"True\"]\n]", "evaluate": true}, {"name": "validation_tests", "label": "Validation tests", "help_url": "", "hint": "

A list of tests used to validate that the student's code is acceptable.
Each item is a list with two string values:

\n", "input_type": "code", "default_value": "[\n [\"arithmetic works\", \"1+1 == 2\"]\n]", "evaluate": true}, {"name": "variables", "label": "Variables to include in code", "help_url": "", "hint": "Give a dictionary mapping variable names to their values. These variables will be available in the code that is run.", "input_type": "code", "default_value": "dict()", "evaluate": true}], "public_availability": "always", "published": true, "extensions": ["programming"]}], "resources": [["question-resources/chi2table.png", "resources/question-resources/chi2table.png"]], "navigation": {"allowregen": true, "showfrontpage": false, "preventleave": false, "typeendtoleave": false}, "question_groups": [{"pickingStrategy": "all-ordered", "questions": [{"name": "Application of maximum-likelihood estimation", "tags": [], "metadata": {"description": "

These questions are designed to help you verify that you can successfully apply the techniques related to maximum-likelihood estimation we have learned in class, e.g., numerical and analytical derivation of estimators, construction of large-sample confidence intervals, and likelihood-ratio tests.

", "licence": "None specified"}, "statement": "

This question encompasses all the material presented in Chapter 2 of the notes. The setup for the entire question is as follows:

\n

Let $Y_1, \\dotsc, Y_n \\overset{\\mathrm{iid}}{\\sim} \\mathrm{Normal}(\\mu, \\gamma)$, for some $\\mu \\in \\mathbb{R}$ and $\\gamma > 0$. Assume we have observed a realisation $\\underline{x} := (x_1, \\dotsc, x_n)$ of $\\underline{X} := (X_1, \\dotsc, X_n)$, where $X_i := \\exp(Y_i)$, for $i = 1, \\dotsc, n$.

\n

", "advice": "

See Canvas.

", "rulesets": {}, "extensions": ["programming"], "builtin_constants": {"e": true, "pi,\u03c0": true, "i": true, "j": false}, "constants": [], "variables": {"log_likelihood": {"name": "log_likelihood", "group": "Ungrouped variables", "definition": "safe(\"x <- c(8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1)\\n\\nlog_likelihood <- function(pars) {\\n mu <- pars[1]\\n gamma <- pars[2]\\n log_like <- sum(dlnorm(x, meanlog = mu, sdlog = sqrt(gamma), log = TRUE))\\n return(log_like)\\n}\")", "description": "", "templateType": "long plain string", "can_override": false}, "log_placeholder": {"name": "log_placeholder", "group": "Ungrouped variables", "definition": "safe(\"x <- c(8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1)\\n\\nlog_likelihood <- function(pars) {\\n #code here\\n}\")", "description": "", "templateType": "long plain string", "can_override": false}, "correct_log_likelihood": {"name": "correct_log_likelihood", "group": "Ungrouped variables", "definition": "safe(\"x <- c(8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1)\\n\\ncorrect_log_likelihood <- function(pars) {\\n mu <- pars[1]\\n gamma <- pars[2]\\n log_like <- sum(dlnorm(x, meanlog = mu, sdlog = sqrt(gamma), log = TRUE))\\n return(log_like)\\n}\")", "description": "", "templateType": "long plain string", "can_override": false}, "Advice": {"name": "Advice", "group": "Ungrouped variables", "definition": "safe(\"### Part (a):\\n\\nThere are two methods to compute this. \\n###### Manipulation of cdf:\\n$$\\n\\\\begin{align*}\\n\tF_X(x) &= \\\\text{Pr}(X < x) \\\\\\\\\\n\t&= \\\\text{Pr}(\\\\exp(Y) < x) \\\\\\\\\\n\t&= \\\\text{Pr}(Y < \\\\log(x)) \\\\\\\\\\n\t&= F_Y(\\\\log(x)).\\n\\\\end{align*}\\n$$\\nTo compute the pdf, we just take derivatives with respect to $y$ of the cdf:\\n$$\\n\\\\begin{align*}\\n\tf_X(x) &= \\\\frac{\\\\partial}{\\\\partial x} F_Y(\\\\log(x)) \\\\\\\\\\n\t&= \\\\frac{1}{x}f_Y(\\\\log(x)).\\n\\\\end{align*}\\n$$\\nSince $X\\\\sim\\\\mathcal{N}(\\\\mu,\\\\gamma)$, we have\\n$$\\n\\\\begin{align*}\\n\t f_X(x) &= \\\\frac{1}{x}\\\\left[ \\\\frac{1}{\\\\sqrt{2\\\\pi\\\\gamma}}\\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}(\\\\log(x) - \\\\mu)^2\\\\right)\\\\right] \\\\\\\\\\n\t &= \\\\frac{1}{x\\\\sqrt{2\\\\pi\\\\gamma}}\\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}(\\\\log(x) - \\\\mu)^2\\\\right).\\n\\\\end{align*}\\n$$\\n###### Density under transformation:\\nIf $Y$ is a random variable with pdf $f_Y(y)$ and \\n$$ X = g(Y), $$\\nwhere $g$ is an invertible differentiable function, then the pdf $f_Y(y)$ of $Y$ is given by the formula\\n$$ f_X(x) = f_X(g^{-1}(x))\\\\left|\\\\frac{\\\\partial g^{-1}}{\\\\partial y}(x)\\\\right|. $$\\nIn order to use this formula we need:\\n\\nThe pdf $f_X(x)$. This is\\n$$ f_X(x) = \\\\frac{1}{\\\\sqrt{2 \\\\pi \\\\gamma}} \\\\exp\\\\left[-\\\\frac{(x-\\\\mu)^2}{2\\\\gamma}\\\\right]. $$\\nThe function $g(y)$. This is\\n$$ g(y) = \\\\exp(y). $$\\nThe inverse $g^{-1}(x)$ of $g(y)$. This is\\n$$ g^{-1}(x) = \\\\log(x) $$\\nThe derivative of the inverse $\\\\frac{\\\\partial g^{-1}}{\\\\partial x}(x)$. This is\\n$$ \\\\frac{\\\\partial g^{-1}}{\\\\partial x}(x) = \\\\frac{1}{x}. $$\\n\\nPlugging these values in to the formula, we obtain:\\n $$\\n\\\\begin{align*}\\n\tf_X(x) &= \\\\frac{1}{\\\\sqrt{2 \\\\pi \\\\gamma}} \\\\exp\\\\left[-\\\\frac{(\\\\log x-\\\\mu)^2}{2\\\\gamma}\\\\right] \\\\left|\\\\frac{1}{x}\\\\right| \\\\\\\\\\n\t&= \\\\frac{1}{x\\\\sqrt{2\\\\gamma\\\\pi}}\\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}(\\\\log(x) - \\\\mu)^2\\\\right).\\n\\\\end{align*}\\n$$\\nHere, the absolute value $\\\\left|\\\\frac{1}{x}\\\\right| = 1/x$ since $x > 0$.\\n### Part (b):\\nThe likelihood function is\\n$$\\n\\\\begin{align*}\\nL(\\\\mu,\\\\gamma; \\\\underline{x}) &= \\\\prod_{i=1}^n \\\\frac{1}{x_i\\\\sqrt{2\\\\gamma\\\\pi}}\\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}(\\\\log(x_i) - \\\\mu)^2\\\\right) \\\\\\\\\\n&= \\\\left(\\\\prod_{i=1}^n x_i\\\\right)^{-1} (2\\\\gamma\\\\pi)^{-n/2} \\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu)^2\\\\right) \\\\\\\\\\n&= \\\\bar{x}_g^{-n} (2\\\\gamma\\\\pi)^{-n/2} \\\\exp\\\\left(-\\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu)^2\\\\right).\\n\\\\end{align*}\\n$$\\nFor the remaining parts, we require the log-likelihood function. This is\\n$$ \\n\\\\begin{align*}\\n\\\\ell(\\\\mu,\\\\gamma;\\\\underline{x}) &= -n\\\\log \\\\bar{x}_g -\\\\frac{n}{2}\\\\log(2\\\\pi\\\\gamma) - \\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n (\\\\log x_i - \\\\mu)^2 \\\\\\\\\\n&= -\\\\sum_{i=1}^n \\\\log(x_i) -\\\\frac{n}{2}\\\\log(2\\\\pi\\\\gamma) - \\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n (\\\\log x_i - \\\\mu)^2 .\\n\\\\end{align*}\\n$$\\n### Part (e):\\nUsing the likelihood function obtained in part (b), the log-likelihood is:\\n$$ \\\\ell(\\\\mu,\\\\gamma;\\\\underline{x}) = -\\\\sum_{i=1}^n\\\\log(x_i) - \\\\frac{n}{2}\\\\log(2\\\\pi) -\\\\frac{n}{2}\\\\log(\\\\gamma) - \\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i -\\\\mu)^2. $$\\nTherefore, the partial derivatives with respect to $\\\\mu$ and $\\\\gamma$ are:\\n$$\\\\begin{align} \\n\\\\frac{\\\\partial}{\\\\partial \\\\mu} \\\\ell(\\\\mu, \\\\gamma; \\\\underline{x}) &= \\\\frac{1}{\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu), \\\\\\\\ \\n\\\\frac{\\\\partial}{\\\\partial \\\\gamma} \\\\ell(\\\\mu, \\\\gamma; \\\\underline{x}) &= -\\\\frac{n}{2\\\\gamma} + \\\\frac{1}{2\\\\gamma^2}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu)^2. \\n\\\\end{align}$$\\n\\n### Part (f):\\nWe want to find $\\\\mu$ and $\\\\gamma$ such that the following two equations are satisfied simultaneously\\n$$ \\n\\\\begin{align}\\n\\\\frac{\\\\partial}{\\\\partial \\\\mu} \\\\ell(\\\\mu, \\\\gamma; \\\\underline{x}) &=0 ,\\\\\\\\\\n\\\\frac{\\\\partial}{\\\\partial \\\\gamma} \\\\ell(\\\\mu, \\\\gamma; \\\\underline{x}) &= 0.\\n\\\\end{align}\\n$$\\nUsing the partial derivatives obtained in part (e), we first note that, since $\\\\gamma > 0$, \\n$$ \\n\\\\begin{align*}\\n\\\\sum_{i=1}^n (\\\\log x_i - \\\\mu) &= 0\\\\\\\\\\n\\\\Rightarrow \\\\mu &= \\\\frac{1}{n}\\\\sum_{i=1}^n \\\\log(x_i).\\n\\\\end{align*}\\n$$\\nThis is the value of $\\\\mu$ that will maximise the likelihood function. To find the associated $\\\\gamma$ value, we solve the second equation:\\n$$\\n\\\\begin{align*}\\n-\\\\frac{n}{2\\\\gamma} + \\\\frac{1}{2\\\\gamma^2}\\\\sum_{i=1}^n\\\\left(\\\\log x_i - \\\\frac{1}{n}\\\\sum_{j=1}^n \\\\log(x_i)\\\\right)^2 &= 0\\\\\\\\\\n\\\\Rightarrow \\\\gamma &= \\\\frac{1}{n}\\\\sum_{i=1}^n\\\\left(\\\\log x_i - \\\\frac{1}{n}\\\\sum_{j=1}^n \\\\log(x_i)\\\\right)^2.\\n\\\\end{align*}\\n$$\\nTherefore, these $\\\\mu$ and $\\\\gamma$ values are the values that maximise the likelihood function.\\n\\n### Part (g):\\n\\nFrom part (f), we know the formulae for the maximum likelihood estimates of $\\\\mu$ and $\\\\gamma$. The data is, $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$. Note that $n = 9$ and\\n$$ \\\\sum_{i=1}^n \\\\log(x_i) = 21.334 \\\\, \\\\text{ (3 d.p.).}$$\\nTherefore,\\n$$\\\\hat{\\\\mu} = \\\\frac{21.334}{9} = 2.370 \\\\, \\\\text{ (3 d.p.),} $$\\nand,\\n$$\\n\\\\begin{align*}\\n\\\\hat{\\\\gamma} &= \\\\frac{1}{9}\\\\sum_{i=1}^n\\\\left(\\\\log(x_i) - 2.370\\\\right)^2 \\\\\\\\\\n&= 0.352 \\\\, \\\\text{ (3 d.p.).}\\n\\\\end{align*}\\n$$\\n### Part (h):\\n\\nWe require the Fisher information of $\\\\gamma$. Recall that $I(\\\\gamma) = -\\\\text{E}\\\\left[\\\\frac{\\\\partial^2}{\\\\partial \\\\gamma^2} \\\\ell(2,\\\\gamma; \\\\underline{X})\\\\right]$ . The second derivative of the log-likelihood with respect to $\\\\gamma$ is:\\n\\n$$ \\\\frac{\\\\partial^2}{\\\\partial\\\\gamma^2}\\\\ell(\\\\mu,\\\\gamma; \\\\underline{x}) = \\\\frac{n}{2\\\\gamma^2} - \\\\frac{1}{\\\\gamma^3}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu)^2 $$\\n\\nWe first compute the Fisher information for $\\\\gamma$,\\n$$ \\n\\\\begin{align*}I(\\\\gamma) &= -\\\\frac{n}{2\\\\gamma^2}+\\\\frac{1}{\\\\gamma^3}\\\\sum_{i=1}^n\\\\text{E}\\\\left[\\\\log(X_i)^2 - 2\\\\mu\\\\log(X_i) + \\\\mu^2\\\\right] \\\\\\\\\\n &= -\\\\frac{n}{2\\\\gamma^2} + \\\\frac{1}{\\\\gamma^3}\\\\sum_{i=1}^n \\\\mu^2 + \\\\gamma - 2\\\\mu^2 + \\\\mu^2 \\\\\\\\\\n&= -\\\\frac{n}{2\\\\gamma^2} + \\\\frac{n}{\\\\gamma^2} = \\\\frac{n}{2\\\\gamma^2}.\\n\\\\end{align*}\\n$$\\n### Part (i):\\nWe need to find the maximum-likelihood estimate of $\\\\gamma$ when $\\\\mu=2$. This is achieved by solving:\\n\\n$$ \\n\\\\begin{align*}\\n\\\\frac{\\\\partial}{\\\\partial \\\\gamma} \\\\ell(2, \\\\gamma; \\\\underline{x}) = -\\\\frac{n}{2\\\\gamma} + \\\\frac{1}{2\\\\gamma^2}\\\\sum_{i=1}^n(\\\\log x_i - 2)^2 &= 0 \\\\\\\\\\n\\\\Rightarrow \\\\gamma &= \\\\frac{1}{n}\\\\sum_{i=1}^n (\\\\log x_i - 2)^2\\n\\\\end{align*}\\n$$\\n\\nTherefore, the asymptotic distribution is:\\n\\n$$ \\\\hat{\\\\gamma} \\\\sim \\\\mathcal{N}\\\\left(\\\\gamma, v\\\\right), $$\\nwhere\\n$$ \\nv = I(\\\\hat{\\\\gamma})^{-1} = \\\\frac{2\\\\left(\\\\sum_{i=1}^n (\\\\log(x_i) - 2)^2\\\\right)^2}{n^3}\\n$$\\nTherefore, the 95% large sample confidence interval takes the form:\\n\\n$$\\\\frac{1}{n}\\\\sum_{i=1}^n (\\\\log x_i - 2)^2 \\\\pm 1.96\\\\times\\\\frac{2^{0.5}\\\\sum_{i=1}^n(\\\\log x_i - 2)^2}{n^{3/2}}$$\\nUsing the data, $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$, we have\\n\\n$$ \\\\sum_{i=1}^n (\\\\log x_i - 2)^2 = 4.407\\\\, \\\\text{ (3 d.p.)}. $$\\nAlso, $n = 9$ and so the 95% large sample confidence interval takes the form $L < \\\\gamma < U$, where\\n\\n$$ L = 0.037\\\\, \\\\text{ (3 d.p.)} \\\\text{ and } U = 0.942 \\\\, \\\\text{ (3 d.p.)}. $$\\n### Part (j):\\n\\nThe admissible set (or parameter space) is $\\\\Theta = \\{(\\\\mu, \\\\gamma) \\\\,|\\\\, \\\\mu \\\\in \\\\mathbb{R}, \\\\gamma \\\\in \\\\mathbb{R}^+\\}$ and the null-hypothesis set is $\\\\Omega = \\{(2,\\\\gamma) \\\\,|\\\\, \\\\gamma\\\\in\\\\mathbb{R}^+\\}$. \\n\\nTherefore, the degrees of freedom of the test-statistic is $d = \\\\text{dim}(\\\\Theta) - \\\\text{dim}(\\\\Omega) = 2 - 1 = 1$.\\n\\nNext, we need to compute the test-statistic $w$. In order to do this, we need to compute\\n\\n$$ \\\\max_{\\\\gamma > 0} \\\\ell(2, \\\\gamma; \\\\underline{x}). $$\\nNote that, from part (i), the maximum likelihood estimate of $\\\\gamma$ when $\\\\mu = 2$ is \\n\\n$$ \\\\hat{\\\\gamma} = \\\\frac{1}{n}\\\\sum_{i=1}^n (\\\\log x_i - 2)^2. $$\\nSubstituting this value into the log-likelihood, we obtain\\n\\n$$\\n\\\\begin{align*}\\n \\\\ell(2,\\\\gamma;\\\\underline{x}) &= -\\\\sum_{i=1}^n\\\\log(x_i) - \\\\frac{n}{2}\\\\log(2\\\\pi) -\\\\frac{n}{2}\\\\log(\\\\gamma) - \\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i -2)^2 \\\\\\\\\\n &= -\\\\sum_{i=1}^n\\\\log(x_i) - \\\\frac{n}{2}\\\\log(2\\\\pi) -\\\\frac{n}{2}\\\\log\\\\left(\\\\frac{1}{n}\\\\sum_{i=1}^n (\\\\log x_i - 2)^2\\\\right) - \\\\frac{n}{2} \\\\\\\\\\n &= -30.891 \\\\, \\\\text{ (3 d.p.)}.\\n \\\\end{align*}\\n $$\\nAlternatively, you could use your $\\\\texttt{log\\\\_likelihood}$ function defined in part (b) and plug in $\\\\texttt{c(2, 0.4896)}$. This is because $\\\\hat{\\\\gamma} = 0.4896 \\\\, \\\\text{ (4 d.p.)}$. \\n\\nNext, we compute the value of\\n$$ \\n\\\\max_{\\\\underline{\\\\theta}\\\\in \\\\Theta} \\\\ell(\\\\mu,\\\\gamma;\\\\underline{x}).\\n$$\\nRecall that, from part (f), the value of $\\\\mu$ and $\\\\gamma$ which maximise the log-likelihood are:\\n\\n$$ \\n\\\\begin{align*}\\n\\\\hat{\\\\mu} &= \\\\frac{1}{n}\\\\sum_{i=1}^n \\\\log x_i \\\\\\\\\\n\\\\hat{\\\\gamma} &= \\\\frac{1}{n}\\\\sum_{i=1}^n\\\\left(\\\\log(x_i) - \\\\frac{1}{n}\\\\sum_{j=1}^n \\\\log x_j\\\\right)^2\\n\\\\end{align*}\\n$$\\nSubstituting these values into the log-likelihood, we obtain:\\n$$\\n\\\\begin{align*}\\n \\\\ell(\\\\hat{\\\\mu},\\\\hat{\\\\gamma};\\\\underline{x}) &= -\\\\sum_{i=1}^n\\\\log(x_i) - \\\\frac{n}{2}\\\\log(2\\\\pi) -\\\\frac{n}{2}\\\\log(\\\\gamma) - \\\\frac{1}{2\\\\gamma}\\\\sum_{i=1}^n(\\\\log x_i - \\\\mu)^2 \\\\\\\\\\n &= -\\\\sum_{i=1}^n\\\\log(x_i) - \\\\frac{n}{2}\\\\log(2\\\\pi) -\\\\frac{n}{2}\\\\log\\\\left(\\\\frac{1}{n}\\\\sum_{i=1}^n \\\\left(\\\\log x_i - \\\\frac{1}{n}\\\\sum_{j=1}^n\\\\log x_j\\\\right)^2\\\\right) - \\\\frac{n}{2} \\\\\\\\\\n &= -29.411 \\\\, \\\\text{ (3 d.p.)}.\\n \\\\end{align*}\\n$$\\nAlternatively, you could use your $\\\\texttt{log\\\\_likelihood}$ function defined in part (b) and plug in $\\\\texttt{c()}$. This is because $\\\\hat{\\\\mu} = 2$ and $\\\\hat{\\\\gamma} = 3$. \\n\\nTherefore, the value of the test-statistic is\\n\\n$$ w = -2(-30.891 - (-29.411)) = 2.960 \\\\, \\\\text{ (3 d.p.).} $$\\n\\nThe relevant value in $\\\\chi^2_d$ table is\\n$$ \\\\text{Pr}(X > 3.84) = 0.05. $$\\nSince $w = 2.960 < 3.84$, we fail to reject the null-hypothesis.\\n\\n\\n\")", "description": "", "templateType": "long plain string", "can_override": false}, "optimum_code": {"name": "optimum_code", "group": "Ungrouped variables", "definition": "safe(\"initial_choice <- c(-1.3, 1.7) \\n\\noptim(\\n par = initial_choice, \\n fn = log_likelihood, \\n lower = c(-Inf, 0.001), \\n upper = c(Inf, Inf),\\n method=\\'L-BFGS-B\\', \\n control = list(fnscale = -1)\\n)\")", "description": "", "templateType": "long plain string", "can_override": false}, "correct_optimum_code": {"name": "correct_optimum_code", "group": "Ungrouped variables", "definition": "safe(\"initial_choice <- c(-1.3, 1.7) \\n\\nanswer1 <- optim(\\n par = initial_choice, \\n fn = log_likelihood, \\n lower = c(-Inf, 0.001), \\n upper = c(Inf, Inf),\\n method=\\'L-BFGS-B\\', \\n control = list(fnscale = -1)\\n)\")", "description": "", "templateType": "long plain string", "can_override": false}, "optimum_placeholder": {"name": "optimum_placeholder", "group": "Ungrouped variables", "definition": "safe(\"initial_choice <- c(-1.3, 1.7) \\n\\noptim(#code here)\")", "description": "", "templateType": "long plain string", "can_override": false}}, "variablesTest": {"condition": "", "maxRuns": 100}, "ungrouped_variables": ["log_likelihood", "log_placeholder", "correct_log_likelihood", "Advice", "optimum_code", "correct_optimum_code", "optimum_placeholder"], "variable_groups": [], "functions": {}, "preamble": {"js": "", "css": ""}, "parts": [{"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "

Let $Y \\sim \\mathrm{Normal}(\\mu, \\gamma)$, for some $\\mu \\in \\mathbb{R}$ and $\\gamma > 0$. Which of the following is the probability density function for $X := \\exp(Y)$?

\n

Hint: Remember that if $X$ is a univariate real random variable with probability density function (PDF) $f_X$ and $h\\colon \\mathbb{R} \\to \\mathbb{R}$ is continuously differentiable and has an inverse $h^{-1}$, then $Z := h(X)$ has PDF $f_{Z}(z) = f_X(h^{-1}(z)) \\lvert \\frac{\\mathrm{d}}{\\mathrm{d} z} h^{-1}(z) \\rvert$, for $z \\in \\mathbb{R}$.

", "minMarks": 0, "maxMarks": 0, "shuffleChoices": true, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["$ f_X(x) = \\frac{1}{x\\sqrt{2\\gamma\\pi}}\\exp\\left(-\\frac{1}{2\\gamma}(\\log(x) - \\mu)^2\\right).$", "$ f_X(x) = \\frac{1}{\\sqrt{2\\gamma\\pi}}\\exp\\left(-\\frac{1}{2\\gamma}(\\log(x) - \\mu)^2\\right).$", "$ f_X(x) = \\frac{1}{x\\sqrt{2\\gamma\\pi}}\\exp\\left(-\\frac{1}{2\\gamma}(e^x - \\mu)^2\\right).$", "$ f_X(x) = \\frac{\\log(x)}{\\sqrt{2\\gamma\\pi}}\\exp\\left(-\\frac{1}{2\\gamma}(x - \\mu)^2\\right).$", "$ f_X(x) = \\frac{1}{\\sqrt{2\\gamma\\pi}}\\exp\\left(-\\frac{1}{\\gamma}(\\log(x) - \\mu)\\right).$"], "matrix": ["5", 0, 0, 0, 0], "distractors": ["It is this one!", "", "", "", ""]}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "

Let $Y_1, \\dotsc, Y_n \\overset{\\mathrm{iid}}{\\sim} \\mathrm{Normal}(\\mu, \\gamma)$, for some $\\mu \\in \\mathbb{R}$ and $\\gamma > 0$. Assume we have observed a realisation $\\underline{x} := (x_1, \\dotsc, x_n)$ of $\\underline{X} := (X_1, \\dotsc, X_n)$, where $X_i := \\exp(Y_i)$, for $i = 1, \\dotsc, n$.

\n

Which of the following functions is the likelihood function $L(\\underline{\\theta}; \\underline{x})$, where $\\underline{\\theta} = (\\mu, \\gamma)^{\\mathrm{T}}$?

\n

Hint: In the following, $\\bar{x}_{\\mathrm{g}} = (\\prod_{i=1}^n x_i)^{1/n}$ is the geometric mean of $x_1, \\dotsc, x_n$.

\n

", "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["$L(\\underline{\\theta}; {\\underline{x}}) = \\bar{x}_{\\mathrm{g}}^{-n} (2\\gamma \\pi)^{-n/2} \\exp\\left\\{-\\frac{1}{2\\gamma}\\sum_{i=1}^n\\left(\\log x_i - \\mu\\right)^2\\right\\}$", "$L(\\underline{\\theta}; \\underline{x}) = (2\\gamma \\pi)^{-n/2} \\exp\\left\\{-\\frac{1}{2\\gamma}\\sum_{i=1}^n\\left(\\log x_i - \\mu\\right)^2\\right\\}$", "$L(\\underline{\\theta}; \\underline{x}) = (2\\gamma \\pi)^{-n/2} \\exp\\left\\{-\\frac{1}{2\\gamma}\\sum_{i=1}^n\\left(e^{x_i} - \\mu\\right)^2\\right\\}$", "$L(\\underline{\\theta}; \\underline{x}) = \\bar{x}_{\\mathrm{g}}^{n} (\\gamma \\pi)^{-n/2} \\exp\\left\\{-\\frac{1}{2\\gamma}\\sum_{i=1}^n\\left(\\log x_i - \\mu\\right)^2\\right\\}$", "$L(\\underline{\\theta}; \\underline{x}) = \\bar{x}_{\\mathrm{g}} (\\gamma \\pi)^{-n} \\exp\\left\\{-\\frac{1}{2\\gamma}\\sum_{i=1}^n\\left(\\log x_i - \\mu\\right)^2\\right\\}$"], "matrix": ["5", 0, 0, 0, 0], "distractors": ["It is this one!", "", "", "", ""]}, {"type": "mark-code-3", "useCustomName": false, "customName": "", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "
\n
In parts (c) and (d), you are asked to numerically compute the maximum likelihood estimate of $(\\mu,\\gamma)^{\\mathrm{T}}$ using $\\texttt{R}$ with provided data. The first step is to write an $\\texttt{R}$ function that computes the log-likelihood.
\n
\n
\n
Using the following data $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$, write an $\\texttt{R}$ function that computes the log-likelihood from the previous part.
\n
", "settings": {"show_input_hint": true, "code_language": "webr", "correct_answer": "{log_likelihood}", "correct_answer_subvars": true, "show_stdout": true, "show_stderr": true, "show_marking_errors": false, "placeholder": "{log_placeholder}", "modifier": "", "preamble": "", "postamble": "{correct_log_likelihood}\n\ncorrect_answer = correct_log_likelihood(c(1,1))\n\nstudent_answer = log_likelihood(c(1,1))\n", "postamble_feedback_whitespace": false, "tests": "[\n [\"Correct Value\", 2, \"all.equal(correct_answer, student_answer, tolerance = 0.001)\"]\n]", "validation_tests": "[\n]", "variables": "dict()"}}, {"type": "mark-code-3", "useCustomName": false, "customName": "", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "

Write an $\\texttt{R}$ command that computes the maximum likelihood estimate of $\\underline{\\theta} = (\\mu,\\gamma)^{\\mathrm{T}}$ numerically. Your initial guess of the optimal $\\underline{\\theta}$ value should be $(-1.3, 1.7)$.

\n

Hint: the function optim will be useful.

", "settings": {"show_input_hint": true, "code_language": "webr", "correct_answer": "{optimum_code}", "correct_answer_subvars": true, "show_stdout": true, "show_stderr": true, "show_marking_errors": false, "placeholder": "{optimum_placeholder}", "modifier": "", "preamble": "{log_likelihood}", "postamble": "answer2 = .Last.value\n\n{correct_optimum_code}", "postamble_feedback_whitespace": false, "tests": "[\n [\"Correct Value\", 2, \"(answer1$par[1] == answer2$par[1]) & (answer1$par[2] == answer2$par[2])\"]\n]", "validation_tests": "[\n]", "variables": "dict()"}}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "

In parts (e), (f) and (g), you will analytically compute the maximum likelihood estimate for $\\underline{\\theta} = (\\mu, \\gamma)^\\mathrm{T}$ and compare it to the value generated by your code in the previous parts.

\n

\n

Using your likelihood function from part (b), and writing $\\ell(\\underline{\\theta}; \\underline{x}) = \\ell(\\mu, \\gamma; \\underline{x})$, which of the following are the correct values of $\\frac{\\partial}{\\partial \\mu} \\ell(\\mu, \\gamma; \\underline{{x}})$ and $\\frac{\\partial}{\\partial \\gamma} \\ell(\\mu, \\gamma; \\underline{x})$?

", "minMarks": 0, "maxMarks": 0, "shuffleChoices": true, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["\\begin{align}
\\frac{\\partial}{\\partial \\mu} \\ell(\\mu, \\gamma; \\underline{x}) &= \\frac{1}{\\gamma}\\sum_{i=1}^n(\\log x_i - \\mu), \\\\
\\frac{\\partial}{\\partial \\gamma} \\ell(\\mu, \\gamma; \\underline{x}) &= -\\frac{n}{2\\gamma} + \\frac{1}{2\\gamma^2}\\sum_{i=1}^n(\\log x_i - \\mu)^2.
\\end{align}", "\\begin{align}
\\frac{\\partial}{\\partial \\mu} \\ell(\\mu, \\gamma; \\underline{x}) &= \\frac{1}{\\gamma}\\sum_{i=1}^n( e^{x_i} - \\mu), \\\\
\\frac{\\partial}{\\partial \\gamma} \\ell(\\mu, \\gamma; \\underline{x}) &= -\\frac{n}{2\\gamma} + \\frac{1}{2\\gamma^2}\\sum_{i=1}^n(e^{x_i} - \\mu)^2.
\\end{align}", "\\begin{align}
\\frac{\\partial}{\\partial \\mu} \\ell(\\mu, \\gamma; \\underline{x}) &= \\frac{1}{2\\gamma}\\sum_{i=1}^n(\\log x_i - \\mu), \\\\
\\frac{\\partial}{\\partial \\gamma} \\ell(\\mu, \\gamma; \\underline{x}) &= -\\frac{n}{\\gamma} + \\frac{1}{\\gamma^2}\\sum_{i=1}^n(\\log x_i - \\mu)^2.
\\end{align}", "\\begin{align}
\\frac{\\partial}{\\partial \\mu} \\ell(\\mu, \\gamma; \\underline{x}) &= \\frac{1}{2\\gamma}\\sum_{i=1}^n(x_i - \\mu), \\\\
\\frac{\\partial}{\\partial \\gamma} \\ell(\\mu, \\gamma; \\underline{x}) &= -\\frac{n}{\\gamma} + \\frac{1}{\\gamma^2}\\sum_{i=1}^n(x_i - \\mu)^2.
\\end{align}", "\\begin{align}
\\frac{\\partial}{\\partial \\mu} \\ell(\\mu, \\gamma; \\underline{x}) &= \\mu + \\frac{1}{2\\gamma}\\sum_{i=1}^n\\log(x_i), \\\\
\\frac{\\partial}{\\partial \\gamma} \\ell(\\mu, \\gamma; \\underline{x}) &= -\\frac{n}{\\gamma} + \\frac{1}{\\gamma^2}\\sum_{i=1}^n(\\log(x_i) - \\mu)^2.
\\end{align}"], "matrix": ["5", 0, 0, 0, 0], "distractors": ["It is this one!", "", "", "", ""]}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "

Using your partial derivatives from part (e), compute formulae for $\\mu$ and $\\gamma$ such that

\n

$\\frac{\\partial}{\\partial \\mu} \\ell(\\mu, \\gamma; \\underline{{x}}) = 0, $

\n

$\\frac{\\partial}{\\partial \\gamma} \\ell(\\mu, \\gamma; \\underline{{x}}) = 0. $

\n

Which of the following are the correct formulae for $\\mu$ and $\\gamma$?

", "minMarks": 0, "maxMarks": 0, "shuffleChoices": true, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["$\\mu = \\frac{1}{n}\\sum_{i=1}^n \\log x_i$
$\\gamma = \\frac{1}{n}\\sum_{i=1}^n\\left(\\log x_i - \\frac{1}{n}\\sum_{j=1}^n \\log x_j\\right)^2$", "$\\mu = \\frac{1}{n}\\sum_{i=1}^n \\log x_i$
$\\gamma = \\frac{1}{n}\\sum_{i=1}^n\\left(\\log x_i - \\mu \\right)^2$", "$\\mu = \\frac{1}{n}\\sum_{i=1}^n e^{x_i}$
$\\gamma = \\frac{1}{n}\\sum_{i=1}^n\\left(e^{x_i} - \\frac{1}{n}\\sum_{j=1}^n e^{x_j}\\right)^2$", "$\\mu = \\frac{1}{n}\\sum_{i=1}^n x_i$
$\\gamma = \\frac{1}{n}\\sum_{i=1}^n\\left( x_i - \\frac{1}{n}\\sum_{j=1}^n x_j\\right)^2$", "$\\mu = \\frac{1}{n}\\sum_{i=1}^n \\log x_i$
$\\gamma = \\frac{1}{2n}\\sum_{i=1}^n\\left(\\log x_i - \\frac{1}{n}\\sum_{j=1}^n \\log x_j\\right)^2$"], "matrix": ["5", 0, 0, 0, 0], "distractors": ["It is this one!", "", "", "", ""]}, {"type": "gapfill", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "

Using the data $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$ from part (c), compute the maximum likelihood estimate of $\\mu$ and $\\gamma$ using your formulae from part (f).

\n

$\\mu = $ [[0]]

\n

$\\gamma = $ [[1]]

", "gaps": [{"type": "numberentry", "useCustomName": true, "customName": "mu", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "2.37047168681101", "maxValue": "2.37047168681101", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "2", "precisionType": "dp", "precision": "2", "precisionPartialCredit": "50", "precisionMessage": "You have not given your answer to the correct precision.", "strictPrecision": false, "showPrecisionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}, {"type": "numberentry", "useCustomName": true, "customName": "gamma", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "0.352389294367405", "maxValue": "0.352389294367405", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "2", "precisionType": "dp", "precision": "2", "precisionPartialCredit": "50", "precisionMessage": "You have not given your answer to the correct precision.", "strictPrecision": false, "showPrecisionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}], "sortAnswers": false}, {"type": "1_n_2", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "

In this part, you will compute a large sample confidence interval for $\\gamma$ by fixing $\\mu = 2$.

\n

Setting $\\mu=2$, which of the following functions is the Fisher information for $\\gamma$?

\n

Hint: If $\\mathbb{E}[\\log(X_i)] = \\mu$ and $\\mathbb{E}[\\log(X_i)^2] = \\gamma + \\mu^2$ for each $i$.

", "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["$\\mathcal{I}(\\gamma) = \\frac{n}{2\\gamma^2}$", "$\\mathcal{I}(\\gamma) = \\frac{n}{\\gamma^2}$", "$\\mathcal{I}(\\gamma) = \\frac{n}{2\\gamma^3}$", "$\\mathcal{I}(\\gamma) = \\frac{n}{\\gamma} + 2n$", "$\\mathcal{I}(\\gamma) = \\frac{2n}{\\gamma}$"], "matrix": ["5", 0, 0, 0, 0], "distractors": ["It is this one!", "", "", "", ""]}, {"type": "gapfill", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "

Using the data from part (c), $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$, compute a 95 % large sample confidence interval for $\\gamma$. 

\n

The intervals takes the form $[l, u]$, where
$l= $[[0]]

\n

and

\n

$u = $ [[1]].

\n

Hint: Remember to set $\\mu = 2$ when computing the maximum-likelihood estimate of $\\gamma$.

", "gaps": [{"type": "numberentry", "useCustomName": true, "customName": "lower", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "0.03723561214181875", "maxValue": "0.03723561214181875", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "", "precisionType": "dp", "precision": "3", "precisionPartialCredit": "50", "precisionMessage": "You have not given your answer to the correct precision.", "strictPrecision": false, "showPrecisionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}, {"type": "numberentry", "useCustomName": true, "customName": "upper", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "0.942041518050176", "maxValue": "0.942041518050176", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "", "precisionType": "dp", "precision": "3", "precisionPartialCredit": "50", "precisionMessage": "You have not given your answer to the correct precision.", "strictPrecision": false, "showPrecisionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}], "sortAnswers": false}, {"type": "gapfill", "useCustomName": false, "customName": "", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "prompt": "

In this final part, you will perform a likelihood ratio test on the parameters $\\underline{\\theta} := (\\mu, \\gamma)^\\mathrm{T}$.

\n

The null hypothesis is $H_0: \\mu = 2$.

\n

What are the degrees of freedom $\\nu$ of the $\\chi^2_\\nu$ test statistic?

\n

$\\nu = $[[0]].

\n

Using the data provided in part (c), $8.6, 11.5, 7.9, 9.5, 4.5, 28.4, 22.4, 17.0, 5.1$, compute the value of the test-statistic.

\n

$\\lambda_{\\mathrm{LR}} = -2\\log\\left(\\frac{\\max_{\\underline{\\theta} \\in  \\Theta_0} L(\\underline{\\theta};\\underline{x})}{\\max_{\\underline{\\theta} \\in \\Theta} L(\\underline{\\theta};\\underline{x})}\\right) =$ [[1]].

\n

Perform a test at the $\\alpha = 0.05$ significance level. Do you reject $H_0$?

\n

[[2]]

\n

\n

\n

", "gaps": [{"type": "numberentry", "useCustomName": true, "customName": "df", "marks": "2", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "1", "maxValue": "1", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "", "showFractionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}, {"type": "numberentry", "useCustomName": true, "customName": "test_statistic", "marks": "5", "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minValue": "2.96037884052939", "maxValue": "2.96037884052939", "correctAnswerFraction": false, "allowFractions": false, "mustBeReduced": false, "mustBeReducedPC": 0, "displayAnswer": "", "precisionType": "dp", "precision": "3", "precisionPartialCredit": "50", "precisionMessage": "You have not given your answer to the correct precision.", "strictPrecision": true, "showPrecisionHint": true, "notationStyles": ["plain", "en", "si-en"], "correctAnswerStyle": "plain"}, {"type": "1_n_2", "useCustomName": true, "customName": "test", "marks": 0, "scripts": {}, "customMarkingAlgorithm": "", "extendBaseMarkingAlgorithm": true, "unitTests": [], "showCorrectAnswer": true, "showFeedbackIcon": true, "variableReplacements": [], "variableReplacementStrategy": "originalfirst", "nextParts": [], "suggestGoingBack": false, "adaptiveMarkingPenalty": 0, "exploreObjective": null, "minMarks": 0, "maxMarks": 0, "shuffleChoices": false, "displayType": "radiogroup", "displayColumns": 0, "showBlankOption": true, "showCellAnswerState": true, "choices": ["No", "Yes"], "matrix": ["4", 0], "distractors": ["", ""]}], "sortAnswers": false}], "partsMode": "all", "maxMarks": 0, "objectives": [], "penalties": [], "objectiveVisibility": "always", "penaltyVisibility": "always", "contributors": [{"name": "Axel Finke", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/31812/"}, {"name": "Christian Lawson-Perfect", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/7/"}, {"name": "Matthew Fisher", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/19545/"}], "resources": ["question-resources/chi2table.png"]}]}], "contributors": [{"name": "Axel Finke", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/31812/"}, {"name": "Christian Lawson-Perfect", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/7/"}, {"name": "Matthew Fisher", "profile_url": "https://numbas.mathcentre.ac.uk/accounts/profile/19545/"}]}