\documentclass[oupdraft]{bio} \usepackage[colorlinks=true, urlcolor=citecolor, linkcolor=citecolor, citecolor=citecolor]{hyperref} %%%%%%%%%%%%%%%% individual %%%%%%%%%%%%%%%%%% \usepackage{graphicx} \usepackage{amssymb} \usepackage{amsthm} \usepackage{amsmath} \usepackage{amsfonts} \usepackage{lscape} \sloppy \newcommand{\comment}[1]{} \newenvironment{llist}{\begin{list}{}{\setlength{\labelwidth}{0in} \setlength{\leftmargin}{0.03in}\addtolength\rightmargin{\labelsep} \setlength{\topsep}{0pt} \itemsep 5pt plus 2pt minus 2pt \parsep 0pt plus 2pt minus 2pt}}{\end{list}} \def\n{\noindent } \def\sm{\smallskip} \def\m{\medbreak} \def\b{\bigbreak} \newtheorem{prop}{Proposition} \newtheorem{corollary}{Corollary} \def\pf{\n{\bf Proof\ \ }} \def\remark{\n{\bf Remark\ \ }} \newcommand{\appeqn}{ \renewcommand{\theequation}{A.\arabic{equation}}} \newcommand{\openr}{\hbox{$\mathbb{R}$}} \newcommand{\imathbf}[1]{\mbox{\boldmath $#1$}} \newcommand{\smathbf}[1]{\mbox{\scriptsize\boldmath $#1$}} \newcommand{\itheta}{\imathbf{\theta}} \newcommand{\stheta}{\smathbf{\theta}} \newcommand{\ilambda}{\imathbf{\lambda}} \newcommand{\ifb}{\imathbf{f}} \newcommand{\ieta}{\imathbf{\eta}} \newcommand{\ipsi}{\imathbf{\psi}} \newcommand{\ialpha}{\imathbf{\alpha}} \newcommand{\ibeta}{\imathbf{\beta}} \newcommand{\idelta}{\imathbf{\delta}} \newcommand{\sdelta}{\smathbf{\delta}} \newcommand{\ig}{\imathbf{g}} \newcommand{\iM}{\imathbf{M}} \newcommand{\iU}{\imathbf{U}} \newcommand{\iZ}{\imathbf{Z}} % MACROS: % Box at the end of a proof: \font\tensmc=cmcsc10 \def\smc{\tensmc} \outer\def\cp#1{\medbreak\noindent\smc\ignorespaces #1\unskip.\enspace\rm\ignorespaces} \outer\def\endcp{\par\ifdim\lastskip<\medskipamount \removelastskip \penalty 55 \fi\medskip\rm} \outer\def\pro#1{\medbreak\noindent\smc\ignorespaces #1\unskip.\enspace\sl\ignorespaces} \outer\def\endpro{\par\ifdim\lastskip<\medskipamount \removelastskip \penalty 55 \fi\medskip\rm} \def\sqr#1#2{{\quad\vbox{\hrule height.#2pt \hbox{\vrule width.#2pt height#1pt \kern#1pt \vrule width.#2pt} \hrule height.#2pt}}} \def\qed{{\sqr53}} % Convergence: \def\gop{{\buildrel P \over \longrightarrow}} \def\god{{\buildrel {\cal D} \over \longrightarrow}} \def\goas{{\buildrel {\rm a.s.} \over \longrightarrow}} \def\as{as $n \to \infty $} % Uppercase Roman numerals: \def\ur#1{\uppercase\expandafter{\romannumeral#1}} % Variance: \def\Var{{\rm Var}} \def\eas {{ \buildrel { \rm a.s. }\over = } } \def\cd{\buildrel d \over =} \def\equalindist{\buildrel d \over =} \def\equalinprob{\buildrel p \over =} \makeatletter \def\singlespace{\def\baselinestretch{1}\@normalsize} \def\endsinglespace{} \renewcommand{\section}{{\setcounter{equation}{0}}{\thesection}} \renewcommand{\thesection}{S} \newtheorem{lemmaS}{Lemma} \renewcommand{\thefootnote}{\fnsymbol{footnote}} \newcommand{\ts}{\mbox{$\tilde{S}$}} \newcommand{\tc}{\mbox{$\tilde{c}$}} \newcommand{\inte} {\int_{0}^{+\infty}} \newcommand{\bias}{\mbox{bias}} \newcommand{\var}{\mbox{var}} \newcommand{\Beta}{\mbox{Beta}} \newcommand{\MISE}{\mbox{MISE}} \newcommand{\MASE}{\mbox{MASE}} \newcommand{\MSE}{\mbox{MSE}} \newcommand{\by}{\mbox{\bf y}} \newcommand{\bX}{\mbox{\bf X}} \newcommand{\bY}{\mbox{\bf Y}} \newcommand{\bW}{\mbox{\bf W}} \newcommand{\br}{\mbox{\bf r}} \newcommand{\bm}{\mbox{\bf m}} \newcommand{\diag}{\mbox{diag}} %%%%%%%%%%%%%%% end of individual %%%%%%%%%%%%%%%%%%%%% \begin{document} % Title of paper \title{Mark-Specific Proportional Hazards Model with Multivariate Continuous Marks and Its Application to HIV Vaccine Efficacy Trials} \author{Yanqing Sun\\ Department of Mathematics and Statistics\\ University of North Carolina at Charlotte, Charlotte, NC 28223 \\ Mei Li$^\ast$\\ School of Public Health\\ Zhejiang University, Hangzhou, China\\ Peter B.\ Gilbert\\ Department of Biostatistics, University of Washington\\ and Fred Hutchinson Cancer Research Center, Seattle, WA 98109} % Running headers of paper: \markboth% % First field is the short list of authors {Y. Sun, M. Li and P. B. Gilbert} % Second field is the short title of the paper {Proportional hazards model with multivariate continuous marks} \maketitle % Add a footnote for the corresponding author if one has been % identified in the author list \footnotetext{To whom correspondence should be addressed.} %\date{} \vspace{.55cm} \centerline{\textsc Supplementary Material} \vspace{.55cm} %\section*{Supplementary Material} %CONTENT OF A BRIEF NOTE.........\\ %CONTENT OF THE BRIEF NOTE.\\ %CONTENT OF THE BRIEF NOTE.\\ This Supplementary Material presents the proofs for Theorem 2.1, Theorem 2.2 and Theorem 3.1 of the paper. Please refer to the main paper for notations not defined here. \par \setcounter{section}{0} \setcounter{equation}{0} \def\theequation{S.\arabic{equation}} \def\thesection{S\arabic{section}} \renewcommand{\thetable}{S.\arabic{table}} \renewcommand{\thefigure}{S.\arabic{figure}} \bigskip \begin{lemmaS} \label{converge}Under conditions (A.1)-(A.3), $S_k^{(j)}(t,v,\bar\beta)$ converges to $s_k^{(j)}(t,v,\bar \beta)$ in probability uniformly in $(t,v,\bar\beta)\in [0,\tau]\times [0,1]^2\times\ {\cal B}$ as $n \to \infty$, for $j=0,1,2$ and $1 \leq k \leq K$. \end{lemmaS} \noindent \textsc{Proof of Lemma \ref{converge}.} Let $S_{ki}^{(j)}(t,v,\bar \beta)=Y_{ki}(t)\exp\{\bar{\beta}^T\tilde Z_{ki}(t,v)\}\tilde Z_{ki}(t,v)^{\otimes{j}}$. Then $S_k^{(j)}(t,v,\bar \beta)=n_k^{-1}\sum_{i=1}^{n_k}S_{ki}^{(j)}(t,v,\bar \beta)$. We prove the lemma for the case when $j=0$. The proofs for $j=1$ and $2$ follow similarly. Let $\omega_{ki}=(X_{ki},Z_{ki})$. Then $\omega_{ki}$, $i=1, \ldots, n_k$, is a random sample from a probability distribution $ P_k $ on a measurable space $({\cal X}_k, {\cal A}_k)$, where ${\cal X}_k=[0,\tau] \times R^p$ and ${\cal A}_k$ is its Borel $\sigma$-field. Let ${\cal F}$ be the class of all coordinate projections $f_{t,v,\bar \beta}(\omega_{ki}):\; {\cal X}_k\longrightarrow R$, where $f_{t,v,\bar \beta}(\omega_{ki})=S_{ki}^{(0)}(t,v,\bar \beta)$, for $(t,v,\bar \beta) \in [0,\tau] \times [0,1]^2 \times \cal B$. Then $S_k^{(0)}(t,v,\bar \beta)=n_k^{-1}\sum_{i=1}^{n_k }f_{t,v,\bar \beta}(\omega_{ki})$. Let $\|f_{t,v,\bar \beta}\|_{P_k,r}=( P_k |f_{t,v,\bar \beta}|^r)^{1/r}=(E_k|S_{ki}^{(0)}(t,v,\bar \beta)|^r)^{1/r}$ be $L_r( P_k )$-norm of $f_{t,v,\bar\beta}$. Next, we show that $\cal F$ is Glivenko-Cantelli (van der Vaart, 1998). Since $Z_{ki}(\cdot)$ is of bounded variation, for simplicity we assume that $Z_{ki}(\cdot)$ is an nonnegative monotone increasing process. In general, $Z_{ki}(\cdot)$ can be expressed as the difference of two nonnegative monotone increasing processes plus a constant. In this case, the class of functions of interest, $\cal F$, is the product of several functional classes. It is Glivenko-Cantelli if each of them is Glivenko-Cantelli. Let $\{t_h\}$, $\{v_j\}$ and $\{\bar \beta_m\}$ be the grid points of finite partitions of the intervals $[0,\tau]$, $[0,1]^2$, and ${\cal B}$, respectively. Let $\{t_{h^{'}},t_h\}$, $\{v_{j^{'}},v_j\}$ and $\{\bar \beta_{m^{'}},\bar\beta_m\}$ be the grid points on the opposite ends of a hyper-cubic of the partitions such that $0\leq t_h-t_{h^{'}}\leq \epsilon$, $0\leq v_j-v_{j^{'}}\leq \epsilon$ and $0\leq \bar \beta_m-\bar \beta_{m^{'}}\leq \epsilon$ for $\epsilon > 0$. Define the bracketing functions $l_{h^{'}j^{'}m^{'}}=S_{ki}^{(0)}(t_{h^{'}}, v_{j^{'}},\bar \beta_{m^{'}})$ and $u_{hjm}=S_{ki}^{(0)}(t_{h}, v_{j},\bar \beta_{m})$. Then for any $f_{t,v,\bar \beta} \in {\cal F}$, there is a bracket $[l_{h^{'}j^{'}m^{'}},u_{hjm}]$ such that $f_{t,v,\bar \beta} \in [l_{h^{'}j^{'}m^{'}},u_{hjm}]$. Further, \begin{eqnarray*} \label{cc1} \|u_{hjm}-l_{h^{'}j^{'}m^{'}}\|_{P_k,2} & & \le \|S_{ki}^{(0)}(t_{h},v_{j},\bar\beta_{m})-S_{ki}^{(0)} (t_{h^{'}},v_{j^{'}},\bar\beta_{m^{'}})\|_{P_k,2}\\ & &=\|Y_{ki}(t_h)exp\{\bar{\beta}_{m}^T\tilde Z_{ki}(t_h,v_j)\} -Y_{ki}(t_{h^{'}})exp\{\bar{\beta}_{m^{'}}^T\tilde Z_{ki}(t_{h^{'}},v_{j^{'}})\}\|_{P_k,2}\\ & & \le [C_1\|t_h-t_{h^{'}}\|+C_2\|v_{j}-v_{j^{'}}\|+C_3\|\bar\beta_m-\bar\beta_{m^{'}}\|]^{1/2}\\ & & \leq C\epsilon^{1/2}, \end{eqnarray*} where $C_1$, $C_2$, $C_3$ and $C$ are some positive constants. Hence, the bracketing number $N_{[\, ]}(\epsilon^{1/2},{\cal F},L_2( P_k ))$ is of the polynomial order $(1/\epsilon)^{4p+3}$. Thus $N_{[\, ]}(\epsilon,{\cal F},L_2( P_k ))$ is of the order $(1/\epsilon)^{2(4p+3)}$. By the Glivenko--Cantelli Theorem (Theorem 19.4 of van der Vaart, 1998), $S_k^{(0)}(t,v,\bar\beta)$ converges in probability uniformly to $s_k^{(0)}(t,v,\bar\beta)$ for $(t,v,\bar\beta)\in [0,\tau]\times [0,1]^2\times {\cal B}$. \qquad\qed \bigskip \noindent \textsc{Proof of Theorem 2.1.} Let \begin{eqnarray*} \label{ll} \eta_n(\bar\beta)&=&n^{-1}(l(\bar\beta)-l(\bar\beta_0))\\ &=&n^{-1}\sum_{k=1}^K\sum_{i=1}^{n_k}\int_0^\tau\int_0^1\bigg[(\bar\beta-\bar\beta_0)^{T}\tilde Z_{ki}(s,u)-\log\bigg\{\frac{S_{k}^{(0)}(s,u,\bar\beta)} {S_{k}^{(0)}(s,u,\bar\beta_0)}\bigg\}\bigg]N_{ki}(ds,du). \end{eqnarray*} By the condition (A.2), Lemma \ref{converge} and the uniform convergence of $n_k^{-1}\sum_{i=1}^{n_k} N_{ki}(t,v)\gop$ $ \int_0^t\int_0^v s_k^{(0)}(s,u,\bar\beta_0)\lambda_{0k}(s,u)\,dsdu$ (Gilbert, Mckeague and Sun, 2004), we have $\eta_n(\bar\beta) \gop \eta(\bar\beta)$, where $$\eta(\bar\beta) =\sum_{k=1}^K p_k E\bigg(\int_0^\tau\int_0^1\bigg[(\bar\beta-\bar\beta_0)^{T}\tilde Z_{ki}(s,u)-\log\bigg\{\frac{s_{k}^{(0)}(s,u,\bar\beta)} {s_{k}^{(0)}(s,u,\bar\beta_0)}\bigg\}\bigg]\bigg)s_k^{(0)}(s,u,\bar\beta_0)\lambda_{0k}(s,u)\,dsdu,$$ uniformly in $\bar\beta\in {\cal B}$. Further, $-{\partial^2 \eta_n(\bar \beta)}/{\partial \bar\beta^2}=n^{-1}I(\bar\beta)$ converges in probability to $\Sigma(\bar\beta)$ uniformly in $\bar\beta \in \cal B$. The limiting matrix function is a positive definite matrix for $\bar\beta\in {\cal B}$ under the conditions (A.2) and (A.3). Hence, $\eta_n(\bar\beta)$ converges in probability to $\eta(\bar\beta)$ which is a concave function with a unique maximum at $\bar \beta_0$. Since $\hat{\bar\beta}$ is the maximizer of $\eta_n(\bar\beta)$, we have $\hat{\bar \beta}$ converges in probability to $\bar \beta_0$ as $n\to \infty$, see van der Vaart (1998). \qquad\qed \bigskip \noindent \textsc{Proof of Theorem 2.2.} Note that $U(\hat{\bar\beta})-U(\bar{\beta}_0)=-I(\bar{\beta}^*)(\hat{\bar\beta}-\bar{\beta}_0)$, where $\bar{\beta}^*$ is on the line segment between $\hat{\bar\beta}$ and $\bar{\beta}_0$. By the uniform convergence of $n^{-1}I(\bar\beta)\gop \Sigma(\bar\beta)$ in probability in $\bar\beta \in \cal B$ and the consistency of $\hat{\bar\beta}$ to $\bar\beta_0$, we have \begin{eqnarray} \label{taylor} & &n^{1/2}(\hat{\bar \beta}-\bar{\beta}_0)=(I(\bar{\beta}^*)/n)^{-1} n^{-1/2}U(\bar{\beta}_0) \nonumber\\ & & = (\Sigma(\bar\beta_0))^{-1} n^{-1/2} U(\bar{\beta}_0)+o_p(1). \label{Taylor1} \end{eqnarray} It remains to show that $n^{-1/2}U(\bar \beta_0)\god N(0,\Sigma(\bar \beta_0))$. Observe that \begin{eqnarray} \label{uu} n^{-1/2}U(\bar{\beta}_0) &=&n^{-1/2}\sum_{k=1}^K\sum_{i=1}^{n_k}\int_0^\tau\int_0^1\bigg[\tilde{Z}_{ki}(t,u)- \frac{S_k^{(1)}(t,u,\bar{\beta}_0)}{S_k^{(0)}(t,u,\bar{\beta}_0)}\bigg]M_{ki}(dt,du). \label{Score-Martingale1} \end{eqnarray} By Lemma \ref{converge} of this Appendix and Lemma 2 of Gilbert, Mckeague and Sun (2008), \begin{eqnarray} n^{-1/2}U(\bar{\beta}_0) &=&n^{-1/2}\sum_{k=1}^K\sum_{i=1}^{n_k}\int_0^\tau\int_0^1\bigg[\tilde{Z}_{ki} (t,u)-\frac{s_k^{(1)}(t,u,\bar{\beta})}{s_k^{(0)}(t,u,\bar{\beta})}\bigg]M_{ki}(dt,du)+o_p(1), \label{Score-Martingale2} \end{eqnarray} which converges in distribution to $N(0,\Sigma(\bar{\beta}_0))$. This completes the proof. \qquad\qed \bigskip \noindent \textsc{Proof of Theorem 2.3.} Note that \begin{eqnarray} & &\hat\Lambda_{0k}(t,v)-\Lambda_{0k}(t,v)\nonumber\\ & &=\int_0^t\int_0^v \bigg[ \frac{1}{n_k S_k^{(0)}(s,u,\hat{\bar\beta})}-\frac{1}{n_k S_k^{(0)}(s,u,\bar\beta_0)}\bigg] N_{k\cdot}(ds,du)+\int_0^t\int_0^v \frac{M_{k\cdot}(ds,du)}{n_k S_k^{(0)}(s,u,\bar\beta_0)}+o_p(n_k^{-1/2})\nonumber\\ & &=\int_0^t\int_0^v \frac{[S_k^{(1)}(s,u,\bar\beta_0)]^T(\bar\beta_0-\hat{\bar\beta}) }{n_k S_k^{(0)}(s,u, \hat{\bar\beta})S_k^{(0)}(s,u,\bar\beta_0)}\,N_{k\cdot}(ds,du) +\int_0^t\int_0^v \frac{M_{k\cdot}(ds,du)}{n_k S_k^{(0)}(s,u,\bar\beta_0)}+o_p(n_k^{-1/2})\nonumber\\ & &=\int_0^t\int_0^v \frac{[S_k^{(1)}(s, u,\bar\beta_0)]^T(\bar\beta_0-\hat{\bar\beta}) }{S_k^{(0)}(s, u,\hat{\bar\beta})}\lambda_{0k}(s,u)\,dsdu +\int_0^t\int_0^v \frac{M_{k\cdot}(ds,du)}{n_k S_k^{(0)}(s,u,\bar\beta_0)}+o_p(n_k^{-1/2}), \label{decomp_lambda} \end{eqnarray} where $M_{k\cdot}(t,v)=\sum_{i=1}^{n_k}M_{ki}(t,v)$. By Slusky Theorem, Lemma 2 of Gilbert, Mckeague and Sun (2008), Lemma A.2 of Sun et al.\ (2009), we have \begin{eqnarray*} & &\sqrt n \{\hat\Lambda_{0k}(t,v)-\Lambda_{0k}(t,v)\} \nonumber\\ & &=-\sqrt n (\hat{\bar\beta}-\bar\beta_0) \int_0^t\int_0^v \frac{[s_k^{(1)}(s, u,\bar\beta_0)] }{s_k^{(0)}(s, u,\bar\beta_0)}\lambda_{0k}(s,u)\,dsdu +\sqrt n \int_0^t\int_0^v \frac{M_{k\cdot}(ds,du)}{n_k s_k^{(0)}(s,u,\bar\beta_0)}+o_p(1). \label{decomp_lambda2} \end{eqnarray*} Since $M_{ki}(t,v)$'s are mark-specific martingales, by (\ref{Taylor1}) and (\ref{Score-Martingale1}), it is easy to check that $\sqrt n(\hat{\bar\beta}-\bar\beta_0)$ is asymptotically independent of the processes $\sqrt n\int_0^t\int_0^v \frac{M_{k\cdot}(ds,du)}{n_k s_k^{(0)}(s,u,\bar\beta_0)}$, $k=1,\ldots,K$. Following the proof of Lemma A.2 of Sun et al.\ (2009), it can be shown that the latter are asymptotically independent mean-zero Guassian random fields, each of them has independent increments. \qquad\qed \noindent \textsc{ Proof of Theorem 3.1.} The cumulative martingale residuals are defined as $$ \hat M_{ki}(t,v)=\int_0^t\int_0^v N_{ki}(ds,du)-Y_{ki}(s) \exp(\hat{\bar \beta}^T \tilde Z_{ki}(s,u)) \hat\Lambda_{0k}(ds,du), $$ where $\hat \Lambda_{0k}(t,v)=\int_0^t\int_0^v \{n_k S_k^{(0)}(s,u,\hat{\bar \beta})\}^{-1}{N_{k\cdot}(ds,du)} $ is the estimator of the doubly cumulative baseline function $\Lambda_{0k}(t,v)=\int_0^t\int_0^v \lambda_{0k}(s,u)dsdu$. It follows that \begin{eqnarray} \hat M_{ki}(t,v)&=&M_{ki}(t,v)-\int_0^t\int_0^v Y_{ki}(s) \exp(\hat{\bar\beta}^T\tilde Z_{ki}(s,u)) [\hat\Lambda_{0k}(ds,du)-\Lambda_{0k}(ds,du)]\nonumber\\ & & -\int_0^t\int_0^v Y_{ki}(s) [\exp(\hat{\bar\beta}^T\tilde Z_{ki}(s,u)) - \exp(\bar{\beta}^T_0\tilde Z_{ki}(s,u)]\Lambda_{0k}(ds,du)]. \label{decomp_res1} \end{eqnarray} \comment{ Note that \begin{eqnarray} & &\hat\Lambda_{0k}(t,v)-\Lambda_{0k}(t,v)\nonumber\\ & &=\int_0^t\int_0^v \bigg[ \frac{1}{n_k S_k^{(0)}(s,u,\hat{\bar\beta})}-\frac{1}{n_k S_k^{(0)}(s,u,\bar\beta_0)}\bigg] N_{k\cdot}(ds,du)+\int_0^t\int_0^v \frac{M_{k\cdot}(ds,du)}{n_k S_k^{(0)}(s,u,\bar\beta_0)}+o_p(n_k^{-1/2})\nonumber\\ & &=\int_0^t\int_0^v \frac{[S_k^{(1)}(s,u,\bar\beta_0)]^T(\bar\beta_0-\hat{\bar\beta}) }{n_k S_k^{(0)}(s,u, \hat{\bar\beta})S_k^{(0)}(s,u,\bar\beta_0)}\,N_{k\cdot}(ds,du) +\int_0^t\int_0^v \frac{M_{k\cdot}(ds,du)}{n_k S_k^{(0)}(s,u,\bar\beta_0)}+o_p(n_k^{-1/2})\nonumber\\ & &=\int_0^t\int_0^v \frac{[S_k^{(1)}(s, u,\bar\beta_0)]^T(\bar\beta_0-\hat{\bar\beta}) }{S_k^{(0)}(s, u,\hat{\bar\beta})}\lambda_{0k}(s,u)\,dsdu +\int_0^t\int_0^v \frac{M_{k\cdot}(ds,du)}{n_k S_k^{(0)}(s,u,\bar\beta_0)}+o_p(n_k^{-1/2}), \label{decomp_lambda} \end{eqnarray} where $M_{k\cdot}(t,v)=\sum_{i=1}^{n_k}M_{ki}(t,v)$. } By (\ref{decomp_lambda}), we have \begin{eqnarray} W_k(t,v,z)&=& n^{-1/2} \sum_{i=1}^{n_k} g_k(Z_{ki},z) M_{ki}(t,v)\nonumber\\ & & +n_k n^{-1/2} \int_0^t\int_0^v \frac{S_{kg}^{(0)}(s,u,z, \hat{\bar\beta}) [S_k^{(1)}(s,u, \bar\beta_0)]^T }{S_k^{(0)}(s,u,\bar\beta_0)} \lambda_{0k}(s,u) \,dsdu \,(\hat{\bar\beta}-\bar\beta_0) \nonumber\\ & & -n^{-1/2}\int_0^t\int_0^v \frac{S_{kg}^{(0)}(s,u,z,\hat{\bar\beta})} { S_k^{(0)}(s,u,\bar\beta_0)}\,M_{k\cdot}(ds,du)\nonumber\\ & &-n_k n^{-1/2}\int_0^t\int_0^v [S_{kg}^{(1)}(s, u,z,\bar\beta_0)]^T \lambda_{0k}(s,u)\,dsdu \,(\hat{\bar\beta}-\bar\beta_0) +o_p(1). \label{decomp1_res} \end{eqnarray} Replacing $\hat{\bar\beta}$ in $S_{kg}^{(0)}(s,u,z, \hat{\bar\beta})$ by $\bar\beta_0$ results in a process that is equivalent to $W_k(t,v,z)$ in probability by Slusky Theorem for the second term and by Lemma 2 of Gilbert, Mckeague and Sun (2008) for the third term of (\ref{decomp1_res}). From the proof of Theorem 2, we have $n^{1/2}(\hat{\bar\beta}-\bar\beta_0)=(\Sigma(\bar{\beta}_0))^{-1} n^{-1/2} U(\bar\beta_0)+o_p(1)$. The second term of (\ref{decomp1_res}) equals $$ (n_k/n)\bigg\{\int_0^t\int_0^v \frac{[S_k^{(1)} (s,u,\bar{\beta}_0)\otimes S_{kg}^{(0)}(s,u,z,\bar \beta)]^T}{S_k^{(0)}(s,u,\bar{\beta}_0)}\lambda_{0k}(s,u)\,dsdu\bigg\}(\Sigma(\bar{\beta}_0))^{-1} n^{-1/2} U(\bar\beta_0)+o_p(1)$$ Similarly, the fourth term of (\ref{decomp1_res}) is equal to \begin{eqnarray*} & & (n_k/n)\bigg\{\int_0^t\int_0^v {[S_{kg}^{(1)}(s,u,z,\bar{\beta}_0)]^T\lambda_{0k}(s,u)}\,dsdu\bigg\} (\Sigma(\bar{\beta}_0))^{-1} n^{-1/2} U(\bar\beta_0) +o_p(1). \end{eqnarray*} Bringing the above expressions into (\ref{decomp1_res}), we have \begin{eqnarray} & & W_k(t,v,z)\nonumber \\ & & =n^{-1/2} \sum_{i=1}^{n_k} \int_0^t\int_0^v \bigg [g_k(Z_{ki},z)-\frac{S_{kg}^{(0)}(s,u,z,\bar{\beta}_0) } { S_k^{(0)}(s,u,z,\bar{\beta}_0)}\bigg ]\,M_{ki}(ds,du)\nonumber\\ & &\quad + {n_k}/{n}\int_0^t \int_0^v \bigg(\frac{S_k^{(1)}(s,u,\bar{\beta}_0) \otimes S_{kg}^{(0)}(s,u,z,\bar{\beta}_0) }{S_k^{(0)}(s,u,\bar{\beta}_0)} - S_{kg}^{(1)}(s,u,z,\bar{\beta}_0)\bigg)^T\lambda_{0k}(s,u)\,dsdu\nonumber\\ & &\quad (\Sigma(\bar{\beta}_0))^{-1} n^{-1/2} U(\bar\beta_0) +o_p(1)\nonumber\\ & &= n^{-1/2}\sum_{i=1}^{n_k} \int_0^t\int_0^v \bigg [g_k(Z_{ki},z)-\frac{S_{kg}^{(0)}(s,u,z, \bar{\beta}_0) } { S_k^{(0)}(s,u,\bar{\beta}_0)}\bigg ]\,M_{ki}(ds,du)\nonumber \\ & & \quad +(R_k(t,v,z))^T (\Sigma(\bar{\beta}_0))^{-1} n^{-1/2}\sum_{l=1}^K\sum_{i=1}^{n_l} \bigg\{\int_0^\tau\int_0^1 \bigg [\tilde Z_{li}(s,u)- \frac{S_l^{(1)}(s,u,\bar{\beta}_0)}{S_l^{(0)}(s,u, \bar{\beta}_0)} \bigg]M_{li}(ds,du)\bigg\}^T \nonumber \\ & & \quad +o_p(1)\nonumber\\ & &= n^{-1/2}\sum_{l=1}^K\sum_{i=1}^{n_l} \int_0^\tau\int_0^v I(l=k)I(s\le t) \bigg [g_l(Z_{li},z)-\frac{S_{lg}^{(0)}(s,u,z, \bar{\beta}_0) } { S_l^{(0)}(s,u,\bar{\beta}_0)}\bigg ]\,M_{li}(ds,du) \nonumber\\ & & \quad +(R_k(t,v,z))^T (\Sigma(\bar{\beta}_0))^{-1} n^{-1/2}\sum_{l=1}^K\sum_{i=1}^{n_l} \bigg\{\int_0^\tau\int_0^1 \bigg [\tilde Z_{li}(s,u)- \frac{S_l^{(1)}(s,u,\bar{\beta}_0)}{S_l^{(0)}(s,u, \bar{\beta}_0)} \bigg]M_{li}(ds,du)\bigg\}^T \nonumber\\ & & \quad +o_p(1). \label{decomp3_res} \end{eqnarray} By the uniform convergence of $S_k^{(0)}(s,u,\bar{\beta}_0)$, $S_k^{(1)}(s,u,\bar{\beta}_0)$, $S_{kg}^{(0)}(s,u,z,\bar{\beta}_0)$ and $S_{kg}^{(1)}(s,u,z,\bar{\beta}_0)$ to $s_k^{(0)}(s,u,\bar{\beta}_0)$, $s_k^{(1)}(s,u,\bar{\beta}_0)$, $s_{kg}^{(0)}(s,u,z,\bar{\beta}_0)$ and $s_{kg}^{(1)}(s,u,z,\bar{\beta}_0)$ in $(s,u)\in [0,\tau]\times[0,1]^2$ in probability, respectively, and by the weak convergence of $n_k^{-1/2}\sum_{i=1}^{n_k}M_{ki}(t,v)$ for $k=1,\ldots K,$ the terms $S_k^{(0)}(s,u,\bar{\beta}_0)$, $S_k^{(1)}(s,u,\bar{\beta}_0)$, $S_{kg}^{(0)}(s,u,z,\bar{\beta}_0)$ and $S_{kg}^{(1)}(s,u,z,\bar{\beta}_0)$ can be replaced by their expected values respectively. This completes the proof. \qquad\qed \end{document}