Example 3

This example shows that maximizing the likelihood (Appendix F) and the WTLS algorithm (Appendix C and D) lead to approximately the same basic ML solution.

using CSV, DataFrames, OptimizationOptimJL, Distributions
import ProfileLikelihood as pl
import Measurements as Mm
using Measurements: ±
using MeasurementErrorModels

Munit =  Mm.Measurement{Float64}
f1 = joinpath(dirname(pathof(MeasurementErrorModels)), "../docs/src/assets/")
D2 = CSV.read(f1*"Palaui.csv", DataFrame, types=Dict([2,3,4].=>Munit))
first(D2, 3)
3×4 DataFrame
Rowtersstv5aHadEN4aya
Float64Measurem…Measurem…Measurem…
11950.1228.43±0.3233.94±0.17-5.601±0.051
21950.2128.63±0.2733.92±0.17-5.618±0.051
31950.2929.2±0.2933.9±0.17-5.842±0.051
function getci(prof, i)
    ml = pl.get_likelihood_solution(prof)[i]
    ci = pl.get_confidence_intervals(prof[i])
    twoσ = maximum(abs, ml .- [ci.lower, ci.upper])
    return ml ± 0.5*twoσ
end

B0 = [-10, -0.22, 0.27]
data = dataf(D2[:,2:end], A="hannart")
prob = pl.LikelihoodProblem(hloglik, B0, data=data, syms=[:b₀, :b₁, :b₂])
ml1 = pl.mle(prob, NelderMead())
LikelihoodSolution. retcode: Success
Maximum likelihood: -788.2442749252225
Maximum likelihood estimates: 3-element Vector{Float64}
     b₀: -24.638819143808327
     b₁: -0.1686866990740737
     b₂: 0.6961010293603915
lb, ub = [-50.0, -0.5, 0.0], [0.0, 0.0, 1.0]
param_ranges = pl.construct_profile_ranges(ml1, lb, ub, [200, 200, 200])
prof = pl.profile(prob, ml1; param_ranges=param_ranges, parallel=true)
Bhml = [getci(prof, i) for i in [1 2 3]]
Bml = wtls(D2[:,2:end])
Bwtls = permutedims(mean(Bml) .± sqrt.(var(Bml)))
Db = DataFrame([Bhml; Bwtls], [:b₀, :b₁, :b₂])
insertcols!(Db, 1, :Method=>["MLE","WTLS"])
2×4 DataFrame
RowMethodb₀b₁b₂
StringMeasurem…Measurem…Measurem…
1MLE-24.6±1.2-0.169±0.0140.696±0.027
2WTLS-24.6±1.8-0.169±0.0140.696±0.046