-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathAdaptiveMCProbErrAnalf.aux
125 lines (125 loc) · 8.11 KB
/
AdaptiveMCProbErrAnalf.aux
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
\relax
\immediate\closeout\minitoc
\let \MiniTOC =N
\@writefile{toc}{\contentsline {title}{Automatic Monte Carlo Algorithms Where the Integrand Size Is Unknown \unskip {}}{1}}
\@writefile{toc}{\contentsline {author}{Fred J. Hickernell\unskip {} \and Lan Jiang\unskip {} \and Yuewei Liu\unskip {} \and Art Owen \unskip {}}{1}}
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}}
\newlabel{samplemean}{{1}{1}}
\citation{BahSav56}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.1}The Statistician's Perspective}{2}}
\newlabel{statperspsec}{{1.1}{2}}
\newlabel{abserr}{{2}{2}}
\newlabel{CLT}{{3}{2}}
\newlabel{samplevar}{{4}{2}}
\newlabel{simpleMCest}{{5}{2}}
\newlabel{boundedkurt}{{6}{2}}
\citation{TraWasWoz88}
\citation{MAT7.12}
\citation{GanGau00a}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.2}The Information-Based Complexity Theorist's or Numerical Analyst's Perspective}{3}}
\newlabel{Lpnormdef}{{7}{3}}
\newlabel{ballintegdef}{{8}{3}}
\newlabel{coneintegdef}{{9}{3}}
\citation{MAT7.12}
\citation{Sha08a}
\citation{TrefEtal12}
\citation{MAT7.12}
\@writefile{toc}{\contentsline {subsection}{\numberline {1.3}Illustrative Univariate Examples of Automatic Algorithms}{4}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Plots of fooling functions, $f$, with $\mu =\DOTSI \intop \ilimits@ _0^1 f(x) \tmspace +\thinmuskip {.1667em} {\rm d}x=1$, but for which the corresponding algorithms return values of $\mathaccentV {hat}05E{\mu }=0$. }}{4}}
\newlabel{foolfunfig}{{1}{4}}
\newlabel{GaussianTestFun}{{10}{4}}
\newlabel{GaussianTestFun}{{1.3}{5}}
\newlabel{FunPara}{{1.3}{5}}
\newlabel{AlgorPara}{{1.3}{5}}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Execution times and errors for test function \textup {\hbox {\mathsurround \z@ \normalfont (\ignorespaces 1.3\hbox {}\unskip \@@italiccorr )}} for $d=1$ and $\varepsilon =10^{-3}$, and a variety of parameters giving a range of $\sigma (f)$ and $\kappa (f)$. The solid line shows that cumulative distribution of actual errors, and the dot-dashed line shows the cumulative distribution of execution times. For the {\tt cubMC} i.i.d.\ and i.i.d.\ heavy duty the points labeled * are those for which the Theorem 1\hbox {} guarantees the error tolerance. }}{6}}
\newlabel{GaussianTestFunFig}{{2}{6}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Execution times and errors for test function \textup {\hbox {\mathsurround \z@ \normalfont (\ignorespaces 1.3\hbox {}\unskip \@@italiccorr )}} for $d=1$ and $\varepsilon =10^{-3}$, and a variety of parameters giving a range of $\sigma (f)$ and $\kappa (f)$. The solid line shows that cumulative distribution of actual errors, and the dot-dashed line shows the cumulative distribution of execution times. For the {\tt cubMC} i.i.d.\ and i.i.d.\ heavy duty the points labeled * are those for which the Theorem 1\hbox {} guarantees the error tolerance. }}{7}}
\newlabel{GaussianTestFunFig}{{3}{7}}
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Execution times and errors for test function \textup {\hbox {\mathsurround \z@ \normalfont (\ignorespaces \G@refundefinedtrue \text {\normalfont \bfseries ??}\GenericWarning { }{LaTeX Warning: Reference `GaussTestFun' on page 8 undefined}\unskip \@@italiccorr )}} for $d=1$ and $\varepsilon =10^{-3}$, and a variety of parameters giving a range of $\sigma (f)$ and $\kappa (f)$. The solid line shows that cumulative distribution of actual errors, and the dot-dashed line shows the cumulative distribution of execution times. For the {\tt cubMC} i.i.d.\ and i.i.d.\ heavy duty the points labeled * are those for which the Theorem 1\hbox {} guarantees the error tolerance. }}{8}}
\newlabel{GaussTestFunFig}{{4}{8}}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\citation{LinBai10a}
\@writefile{toc}{\contentsline {section}{\numberline {2}Simple Monte Carlo with Guaranteed Error Estimation}{9}}
\@writefile{toc}{\contentsline {section}{\numberline {3}Adaptive Monte Carlo with Guaranteed Error Estimation}{9}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Reliably Bounding the Variance}{9}}
\newlabel{ChebCantlem}{{1}{9}}
\newlabel{LP}{{1}{9}}
\citation{Mil86}
\newlabel{varbdlem}{{2}{10}}
\newlabel{sampvarbd}{{12}{10}}
\newlabel{sampvarup}{{12a}{10}}
\newlabel{sampvarlo}{{12b}{10}}
\newlabel{kappamaxdef}{{13}{10}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}Determining the Sample Size}{10}}
\citation{Pet95a}
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces (a) The maximum kurtosis, $\kappa _{\qopname \relax m{max}}(\alpha ,n_{\sigma },1.5)$, as defined in \textup {\hbox {\mathsurround \z@ \normalfont (\ignorespaces 13\hbox {}\unskip \@@italiccorr )}}; (b) comparison of sample sizes $ N_G(0.01,\alpha )$, $N_C(0.01,\alpha )$, and $N_B(0.01,\alpha ,\kappa _{\qopname \relax m{max}}^{3/4}(\alpha ,1000,1.5))$.}}{11}}
\newlabel{kurtmaxcompareNfig}{{5}{11}}
\newlabel{ChebErr}{{14}{11}}
\newlabel{ChebProbEst}{{14a}{11}}
\newlabel{NCdef}{{14b}{11}}
\newlabel{BE}{{3}{11}}
\newlabel{BEresult}{{15}{12}}
\newlabel{proberrcritsampleBE}{{16}{12}}
\newlabel{NB}{{16b}{12}}
\newlabel{mainadaptthm}{{1}{12}}
\newlabel{NCBdef}{{17}{12}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Cost of the Algorithm}{13}}
\citation{Owe95,Owe96,Owe97,Mat98,HonHic00a,DicPil10a}
\citation{Owe95,Owe96,Owe97,DicPil10a}
\citation{Hal05a,Owe06a}
\newlabel{costtheorem}{{2}{14}}
\@writefile{toc}{\contentsline {section}{\numberline {4}More Numerical Examples}{14}}
\@writefile{toc}{\contentsline {section}{\numberline {5}Numerical Examples with ${\tt cubMC}\xspace $}{14}}
\newlabel{numerexsec}{{5}{14}}
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces Execution times and errors for test function \textup {\hbox {\mathsurround \z@ \normalfont (\ignorespaces 1.3\hbox {}\unskip \@@italiccorr )}} for $d=7$ and $\varepsilon =10^{-3}$, and a variety of parameters giving a range of $\sigma (f)$ and $\kappa (f)$. The solid line shows that cumulative distribution of actual errors, and the dot-dashed line shows the cumulative distribution of execution times. For the {\tt cubMC} i.i.d.\ and i.i.d.\ heavy duty the points labeled * are those for which the Theorem 1\hbox {} guarantees the error tolerance.}}{15}}
\newlabel{GausstianTestFunHDFig}{{6}{15}}
\newlabel{qsedef}{{20}{15}}
\@writefile{toc}{\contentsline {section}{\numberline {6}A General Error Criterion}{16}}
\newlabel{relerrsec}{{6}{16}}
\newlabel{relerrcrit}{{6}{16}}
\newlabel{genrelerrcrit}{{21}{16}}
\newlabel{relerrcritd}{{22}{16}}
\newlabel{NCinv}{{6}{17}}
\newlabel{NBinv*}{{6}{17}}
\newlabel{NCBinv*}{{6}{17}}
\newlabel{alphaseq}{{23}{17}}
\newlabel{alphaseqex}{{24}{17}}
\newlabel{relerradaptthm}{{3}{17}}
\newlabel{boundcstep}{{2}{18}}
\newlabel{newhvarepsstep}{{2c}{18}}
\newlabel{hmufinalstep}{{3}{18}}
\bibstyle{spbasic}
\bibdata{FJH22,FJHown22}
\bibcite{BahSav56}{{1}{1956}{{Bahadur and Savage}}{{}}}
\bibcite{DicPil10a}{{2}{2010}{{Dick and Pillichshammer}}{{}}}
\bibcite{GanGau00a}{{3}{2000}{{Gander and Gautschi}}{{}}}
\bibcite{TrefEtal12}{{4}{2012}{{Hale et~al}}{{Hale, Trefethen, and Driscoll}}}
\bibcite{HonHic00a}{{5}{2003}{{Hong and Hickernell}}{{}}}
\bibcite{LinBai10a}{{6}{2010}{{Lin and Bai}}{{}}}
\bibcite{Mat98}{{7}{1998}{{Matou\v {s}ek}}{{}}}
\bibcite{Mil86}{{8}{1986}{{Miller}}{{}}}
\bibcite{Owe95}{{9}{1995}{{Owen}}{{}}}
\bibcite{Owe96}{{10}{1997{a}}{{Owen}}{{}}}
\bibcite{Owe97}{{11}{1997{b}}{{Owen}}{{}}}
\bibcite{Pet95a}{{12}{1995}{{Petrov}}{{}}}
\bibcite{Sha08a}{{13}{2008}{{Shampine}}{{}}}
\bibcite{MAT7.12}{{14}{2012}{{The MathWorks, Inc.}}{{}}}
\bibcite{TraWasWoz88}{{15}{1988}{{Traub et~al}}{{Traub, Wasilkowski, and Wo\'zniakowski}}}
\immediate\closeout\minitoc
\@writefile{toc}{\contentsline {section}{\numberline {7}Discussion}{19}}
\@writefile{toc}{\contentsline {section}{References}{19}}
\@mtwritefile{\contentsline {mtchap}{References}{19}}