Appendix A
Proof of Lemma 2.1
By the definition of covariance, we have
$$ \begin{aligned} Cov(Z_{1i}^{*} ,Z_{2i}^{*} ) & = [E(Z_{1i}^{*} Z_{2i}^{*} ) - E(Z_{1i}^{*} )E(Z_{2i}^{*} )] \\ & = PE[(Y_{1i} S_{1}^{2} + Y_{2i} S_{1} S_{2} + FS_{1} )(Y_{1i} S_{1} + Y_{2i} S_{2} + F)] \\ & \quad + (1 - P)E[(Y_{1i} S_{1} S_{2} + Y_{2i} S_{2}^{2} + FS_{2} )(Y_{1i} S_{1} + Y_{2i} S_{2} + F)] \\ & \quad - E(Y_{1i} S_{1} + Y_{2i} S_{2} + F)E[P(Y_{1i} S_{1}^{2} + Y_{2i} S_{1} S_{2} + FS_{1} ) \\ & \quad + (1 - P)(Y_{1i} S_{1} S_{2} + Y_{2i} S_{2}^{2} + FS_{2} )] \\ & = \sigma_{{Z_{1} Z_{2} }} + P\left[ {F\{ \mu_{{Y_{1} }} (\gamma_{20} + \theta_{1}^{2} ) + \mu_{{Y_{2} }} \theta_{1} \theta_{2} \} + F\{ \mu_{{Y_{1} }} (\gamma_{20} + \theta_{1}^{2} ) + \mu_{{Y_{2} }} \theta_{1} \theta_{2} \} + F^{2} \theta_{1} } \right] \\ & \quad + (1 - P)\left[ {F\{ \mu_{{Y_{1} }} \theta_{1} \theta_{2} + \mu_{{Y_{2} }} (\gamma_{02} + \theta_{2}^{2} )\} + F\{ \mu_{{Y_{1} }} \theta_{1} \theta_{2} + \mu_{{Y_{2} }} (\gamma_{02} + \theta_{2}^{2} )\} + F^{2} \theta_{2} } \right] \\ & \quad - F\left[ {P\{ \mu_{{Y_{1} }} (\gamma_{20} + \theta_{1}^{2} ) + \mu_{{Y_{2} }} \theta_{1} \theta_{2} \} + (1 - P)\{ \mu_{{Y_{1} }} \theta_{1} \theta_{2} + \mu_{{Y_{2} }} (\gamma_{02} + \theta_{2}^{2} )\} } \right] \\ & \quad - \left( {\mu_{{Y_{1} }} \theta_{1} + \mu_{{Y_{2} }} \theta_{2} } \right)\left\{ {PF\theta_{1} + (1 - P)F\theta_{2} } \right\} - PF^{2} \theta_{1} - (1 - P)F^{2} \theta_{2} \\ & = \sigma_{{Z_{1} Z_{2} }} + F[P\mu_{{Y_{1} }} \gamma_{20} + P\mu_{{Y_{1} }} \theta_{1}^{2} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} + (1 - P)\mu_{{Y_{2} }} \theta_{2}^{2}\\ &\quad - P\mu_{1} \theta_{1}^{2} - (1 - P)\mu_{{Y_{2} }} \theta_{2}^{2} ] \\ & = \sigma_{{Z_{1} Z_{2} }} + F[P\mu_{{Y_{1} }} \gamma_{20} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} ], \\ \end{aligned} $$
(A.1)
which proves the lemma.□
Proof of Lemma 2.2
Although its proof is obvious because adding a constant \( F \) to \( Z_{1i} \) will not change its variance, we also demonstrate it mathematically as follows:By the definition of variance, the variance of \( Z_{1i}^{*} \) is given by
$$ \begin{aligned} \sigma_{{Z_{1}^{*} }}^{2} & = E(Z_{1i}^{*2} ) - (E(Z_{1i}^{*} ))^{2} \\ & = E[(Y_{1i} S_{1} + Y_{2i} S_{2} + F)^{2} ] - [E(Y_{1i} S_{1} + Y_{2i} S_{2} + F)]^{2} \\ & = E[(Y_{1i}^{2} S_{1}^{2} + Y_{2i}^{2} S_{2}^{2} + F^{2} + 2Y_{1i} Y_{2i} S_{1} S_{2} + 2Y_{1i} S_{1} F + 2Y_{2i} S_{2} F]\\ & \quad - [\mu_{{Y_{1} }} \theta_{1} + \mu_{{Y_{2} }} \theta_{2} + F]^{2} \\ & = (\sigma_{{Y_{1} }}^{2} + \mu_{{Y_{1} }}^{2} )(\gamma_{20} + \theta_{1}^{2} ) + (\sigma_{{Y_{2} }}^{2} + \mu_{{Y_{2} }}^{2} )(\gamma_{02} + \theta_{2}^{2} ) + 2(\sigma_{{Y_{1} Y_{2} }} + \mu_{{Y_{1} }} \mu_{{Y_{2} }} )\theta_{1} \theta_{2} \\ & \quad + F^{2} + 2\mu_{{Y_{2} }} \theta_{2} F + 2\mu_{{Y_{1} }} F\theta_{1} \\ & \quad - [\mu_{{Y_{1} }}^{2} \theta_{1}^{2} + \mu_{{Y_{2} }} \theta_{2}^{2} + F^{2} + 2\mu_{{Y_{1} }} \theta_{1} F + 2\mu_{{Y_{2} }} \theta_{2} F + 2\mu_{{Y_{1} }} \mu_{{Y_{2} }} \theta_{1} \theta_{2} ] \\ & = \gamma_{20} (\sigma_{{Y_{1} }}^{2} + \mu_{{Y_{1} }}^{2} ) + \gamma_{02} (\sigma_{{Y_{2} }}^{2} + \mu_{{Y_{2} }}^{2} ) + \theta_{1}^{2} \sigma_{{Y_{1} }}^{2} + \theta_{2}^{2} \sigma_{{Y_{2} }}^{2} + 2\theta_{1} \theta_{2} \sigma_{{Y_{1} Y_{2} }} \\ & = \sigma_{{Z_{1} }}^{2}, \\ \end{aligned} $$
(A.2)
which proves the lemma.□
Proof of Lemma 2.3
By the definition of variance, we have
$$ \begin{aligned} \sigma_{{Z_{2i}^{*} }}^{2} & = E(Z_{2i}^{*2} ) - (E(Z_{2i}^{*} ))^{2} \\ & = PE[S_{1}^{2} Y_{1i} + S_{1} S_{2} Y_{2i} + S_{1} F]^{2} + (1 - P)E[S_{1} S_{2} Y_{1i} + S_{2}^{2} Y_{2i} + S_{2} F]^{2}\\ & \quad - [PE(S_{1}^{2} Y_{1i} + S_{1} S_{2} Y_{2i} + S_{1} F) + (1 - P)E(S_{1} S_{2} Y_{1i} + S_{2}^{2} Y_{2i} + S_{2} F)]^{2} \\ & = PE[S_{1}^{4} Y_{1i}^{2} + S_{1}^{2} S_{2}^{2} Y_{2i}^{2} + S_{1}^{2} F^{2} + 2S_{1}^{3} S_{2} Y_{1i} Y_{2i} + 2S_{1}^{3} Y_{1i} F + 2S_{1}^{2} S_{2} Y_{2i} F] \\ & \quad + (1 - P)E[S_{1}^{2} S_{2}^{2} Y_{1i}^{2} + S_{2}^{4} Y_{2i}^{2} + S_{2}^{2} F^{2} + 2S_{1} S_{2}^{3} Y_{1i} Y_{2i} + 2S_{1} S_{2}^{2} Y_{1i} F + 2S_{2}^{3} Y_{2i} F] \\ & \quad - [P\{ (\gamma_{20} + \theta_{1}^{2} )\mu_{{Y_{1} }} + \theta_{1} \theta_{2} \mu_{{Y_{2} }} + \theta_{1} F)\} + (1 - P)\{ \theta_{1} \theta_{2} \mu_{{Y_{1} }} + (\gamma_{02} + \theta_{2}^{2} )\mu_{{Y_{2} }} + \theta_{2} F\} ]^{2} \\ & = PE[S_{1}^{4} Y_{1i}^{2} + S_{1}^{2} S_{2}^{2} Y_{2i}^{2} + S_{1}^{2} F^{2} + 2S_{1}^{3} S_{2} Y_{1i} Y_{2i} + 2S_{1}^{3} Y_{1i} F + 2S_{1}^{2} S_{2} Y_{2i} F] \\ & \quad + (1 - P)E[S_{1}^{2} S_{2}^{2} Y_{1i}^{2} + S_{2}^{4} Y_{2i}^{2} + S_{2}^{2} F^{2} + 2S_{1} S_{2}^{3} Y_{1i} Y_{2i} + 2F(S_{2}^{3} Y_{2i} + S_{1} S_{2}^{2} Y_{1i} )] \\ & \quad - [P\{ (\gamma_{20} + \theta_{1}^{2} )\mu_{{Y_{1} }} + \theta_{1} \theta_{2} \mu_{{Y_{2} }} \} + (1 - P)\{ \theta_{1} \theta_{2} \mu_{{Y_{1} }} + (\gamma_{02} + \theta_{2}^{2} )\mu_{{Y_{2} }} + F\{ P\theta_{1} + (1 - P)\theta_{2} \} ]^{2} \\ & = P[E(S_{1}^{4} )(\sigma_{{Y_{1} }}^{2} + \mu_{{Y_{1} }}^{2} ) + (\gamma_{20} + \theta_{1}^{2} )(\gamma_{02} + \theta_{2}^{2} )(\sigma_{{Y_{2} }}^{2} + \mu_{{Y_{2} }}^{2} ) + 2E(S_{1}^{3} )\theta_{2} (\sigma_{{Y_{1} Y_{2} }} + \mu_{{Y_{1} }} \mu_{{Y_{2} }} )] \\ & \quad + (1 - P)[\{ (\gamma_{20} + \theta_{1}^{2} )(\gamma_{02} + \theta_{2}^{2} )(\sigma_{{Y_{1} }}^{2} + \mu_{{Y_{1} }}^{2} ) + E(S_{2}^{4} )(\sigma_{{Y_{2} }}^{2} + \mu_{{Y_{2} }}^{2} ) + 2\theta_{1} E(S_{2}^{3} )(\sigma_{{Y_{1} Y_{2} }} + \mu_{{Y_{1} }} \mu_{{Y_{2} }} )] \\ & \quad - [P\{ (\gamma_{20} + \theta_{1}^{2} )\mu_{{Y_{1} }} + \theta_{1} \theta_{2} \mu_{{Y_{2} }} \} + (1 - P)\{ \theta_{1} \theta_{2} \mu_{{Y_{1} }} + (\gamma_{02} + \theta_{2}^{2} )\mu_{{Y_{2} }} \} ]^{2} \\ & \quad + PF^{2} (\gamma_{20} + \theta_{1}^{2} ) + (1 - P)F^{2} (\gamma_{02} + \theta_{2}^{2} ) - F^{2} \{ P\theta_{1} + (1 - P)\theta_{2} \}^{2} \\ & \quad + 2F\left[ {P(E(S_{1}^{3} )\mu_{{Y_{1} }} + (\gamma_{20} + \theta_{1}^{2} )\theta_{2} \mu_{{Y_{2} }} ) + (1 - P)(\theta_{1} (\gamma_{02} + \theta_{2}^{2} )\mu_{{Y_{1} }} + E(S_{2}^{3} )\mu_{{Y_{2} }} )} \right. \\ & \quad - (P\theta_{1} + (1 - P)\theta_{2} )\{ P((\gamma_{20} + \theta_{1}^{2} )\mu_{{Y_{1} }} + \theta_{1} \theta_{2} \mu_{{Y_{2} }} ) + (1 - P)(\theta_{1} \theta_{2} \mu_{{Y_{1} }} + (\gamma_{02} + \theta_{2}^{2} )\mu_{{Y_{2} }} )\} ] \\ & = \sigma_{{Z_{2} }}^{2} + F^{2} [P(\gamma_{20} + \theta_{1}^{2} ) + (1 - P)(\gamma_{02} + \theta_{2}^{2} ) - (P\theta_{1} + (1 - P)\theta_{2} )^{2} ] + 2F[P(E(S_{1}^{3} )\mu_{{Y_{1} }} \\ & \quad + (\gamma_{20} + \theta_{1}^{2} )\theta_{2} \mu_{{Y_{2} }} ) + (1 - P)(\theta_{1} (\gamma_{02} + \theta_{2}^{2} )\mu_{{Y_{1} }} + E(S_{2}^{3} )\mu_{{Y_{2} }} ) - (P\theta_{1} + (1 - P)\theta_{2} ) \\ & \quad \{ P((\gamma_{20} + \theta_{1}^{2} )\mu_{{Y_{1} }} + \theta_{1} \theta_{2} \mu_{{Y_{2} }} ) + (1 - P)(\theta_{1} \theta_{2} \mu_{{Y_{1} }} + (\gamma_{02} + \theta_{2}^{2} )\mu_{{Y_{2} }} )\} ] \\ & = \sigma_{{Z_{2} }}^{2} + F^{2} T_{1} + 2FT_{2}, \, \\ \end{aligned} $$
(A.3)
which proves the lemma.□
Proof of Theorem 2.1
It follows from the fact that
$$ E\left( {\bar{Z}_{1}^{*} } \right) - F = \theta_{1} \mu_{{Y_{1} }} + \theta_{2} \mu_{{Y_{2} }} $$
and
$$ E(\bar{Z}_{2}^{*} ) - F\{ P\theta_{1} + (1 - P)\theta_{2} \} = \{ P(\gamma_{20} + \theta_{1}^{2} ) + (1 - P)\theta_{1} \theta_{2} \} \mu_{{Y_{1} }} + [P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )]\mu_{{Y_{2} }}. $$
□
Proof of Theorem 2.2
The variance of the estimator \( \hat{\mu }_{{Y_{1} }}^{*} \) is given by
$$ \begin{aligned} V(\hat{\mu }_{{Y_{1} }}^{*} ) & = V\left[ {\frac{{\{ P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )\} \bar{Z}_{1}^{*} - \theta_{2} \bar{Z}_{2}^{*} - F(1 - P)\gamma_{02} }}{{(1 - P)\gamma_{02} \theta_{1} - P\theta_{2} \gamma_{20} }}} \right] \\ & = \left[ {\frac{{P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )]^{2} V(\bar{Z}_{1}^{*} ) + \theta_{2}^{2} V(\bar{Z}_{2}^{*} ) - 2\theta_{2} [P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )]Cov(\bar{Z}_{1}^{*} ,\bar{Z}_{2}^{*} )}}{{[(1 - P)\gamma_{02} \theta_{1} - P\theta_{2} \gamma_{20} ]^{2} }}} \right] \\ & = \frac{{[P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )]^{2} \sigma_{{Z_{1}^{*} }}^{2} + \theta_{2}^{2} \sigma_{{Z_{2}^{*} }}^{2} - 2\theta_{2} [P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )]\sigma_{{Z_{1}^{ * } Z_{2}^{ * } }} }}{{n[(1 - P)\gamma_{02} \theta_{1} - P\theta_{2} \gamma_{20} ]^{2} }} \\ & = \frac{1}{{[(1 - P)\theta_{1} \gamma_{02} - P\theta_{2} \gamma_{20} ]^{2} }}\left[ {\{ P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )\}^{2} \sigma_{{Z_{1} }}^{2} + \theta_{2}^{2} \sigma_{{Z_{2} }}^{2}} \right.\\ &\quad \left.{ - 2\theta_{2}\{ P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )\} \sigma_{{Z_{1} Z_{2} }} } \right. \\ & \quad \left. { + \theta_{2}^{2} F^{2} T_{1} + 2F\theta_{2}^{2} T_{2} - 2F\theta_{2} \{ P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )\} \{ P\mu_{{Y_{1} }} \gamma_{02} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} \} } \right] \\ & = V(\hat{\mu }_{{Y_{1} }} ) + \frac{{\theta_{2}^{2} F^{2} T_{1} + 2F\theta_{2}^{2} T_{2} - 2F\theta_{2} \{ P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )\} \{ P\mu_{{Y_{1} }} \gamma_{20} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} \} }}{{n[(1 - P)\theta_{1} \gamma_{02} - P\theta_{2} \gamma_{20} ]^{2} }}. \\ \end{aligned} $$
(A.4)
Now the variance in (A.4) will be minimum for the optimum value of \( F \), given by setting\( \frac{{\partial V(\hat{\mu }_{{Y_{1} }}^{*} )}}{\partial F} = 0 \),which gives
$$ \frac{{2F\theta_{2}^{2} T_{1} + 2\theta_{2}^{2} T_{2} - 2\theta_{2} \{ P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )\} \{ P\mu_{{Y_{1} }} \gamma_{20} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} \} }}{{n[(1 - P)\theta_{1} \gamma_{02} - P\theta_{2} \gamma_{20} ]^{2} }} = 0.\quad $$
This will be satisfied either if \( \theta_{2} = 0 \) or if
$$ F = - \frac{{\theta_{2} T_{2} - \{ (P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )\} \{ P\mu_{{Y_{1} }} \gamma_{20} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} \} }}{{T_{1} \theta_{2} }} .$$
(A.5)
We note that if the value of \( \theta_{2} \) is zero, then \( F \) is undefined; also such a choice of \( \theta_{2} = 0 \) shows that:
$$ V(\hat{\mu }_{{Y_{1} }}^{*} ) = V(\hat{\mu }_{{Y_{1} }} ) .$$
(A.6)
Thus, an investigator should not choose \( \theta_{2} = 0 \) in the proposed new re-scrambling model. For \( \theta_{2} \ne 0 \), there exists a good guess of \( F \) which investigator can make to achieve the minimum variance of \( \hat{\mu }_{{Y_{1} }}^{*} \), given by
$$ \min {\text{V(}}\hat{\mu }_{{Y_{1} }}^{ * } )= V(\hat{\mu }_{{Y_{1} }} ) - \frac{{[\theta_{2} T_{2} - \{ P\theta_{1} \theta_{2} + (1 - P)(\gamma_{02} + \theta_{2}^{2} )\} \{ P\mu_{{Y_{1} }} \gamma_{20} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} \} ]^{2} }}{{nT_{1} [(1 - P)\theta_{1} \gamma_{02} - P\theta_{2} \gamma_{20} ]^{2} }}, $$
which proves the theorem.□
Proof of Theorem 2.3
The variance of the estimator \( \hat{\mu }_{{Y_{2} }}^{*} \) is given by
$$ \begin{aligned} V(\hat{\mu }_{{Y_{2} }}^{*} ) & = \frac{{\theta_{1}^{2} \sigma_{{Z_{2}^{ * } }}^{2} + [P(\gamma_{02} + \theta_{1}^{2}) + (1 - P)\theta_{1} \theta_{2} ]^{2} \sigma_{{Z_{1}^{ * } }}^{2} - 2\theta_{1} [P(\gamma_{20} + \theta_{1}^{2} ) + (1 - P)\theta_{1} \theta_{2} ]\sigma_{{Z_{1}^{ * } Z_{2}^{ * } }} }}{{n[(1 - P)\theta_{1} \gamma_{02} - P\theta_{2} \gamma_{20} ]^{2} }} \\ & = \frac{1}{{n\{ (1 - P)\theta_{1} \gamma_{02} - P\gamma_{20} \theta_{2} \}^{2} }}\left[ {\theta_{1}^{2} \sigma_{{Z_{2} }}^{2} } \right. + \left\{ {P(\gamma_{20} + \theta_{1}^{2} ) + (1 - P)\theta_{1} \theta_{2} } \right\}^{2} \sigma_{{Z_{1} }}^{2} \\ & \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad - 2\theta_{1} \left\{ {P(\gamma_{20} + \theta_{1}^{2} ) + (1 - P)\theta_{1} \theta_{2} } \right\}\sigma_{{Z_{1} Z_{2} }} + F^{2} \theta_{1}^{2} T_{1} + 2FT_{2} \theta_{1}^{2} \\ & \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \quad \left. { - 2F\theta_{1} \{ P(\gamma_{20} + \theta_{1}^{2} ) + (1 - P)\theta_{1} \theta_{2} \} \{ P\mu_{{Y_{1} }} \gamma_{20} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} \} } \right] \\ & = V(\hat{\mu }_{{Y_{2} }} ) + \frac{{\theta_{1}^{2} F^{2} T_{1} + 2F\theta_{1}^{2} T_{2} - 2F\theta_{1} \{ P(\gamma_{20} + \theta_{1}^{2} ) + (1 - P)\theta_{1} \theta_{2} \} \{ P\mu_{{Y_{1} }} \gamma_{20} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} \} }}{{n[(1 - P)\theta_{1} \gamma_{02} - P\theta_{2} \gamma_{20} ]^{2} }}. \\ \end{aligned} $$
(A.7)
Now the variance in (A.7) will be minimum for the optimum value of \( F \), given by setting
$$ \frac{{\partial V(\hat{\mu }_{Y2}^{*} )}}{\partial F} = 0, $$
which gives
$$ \frac{{2\theta_{1}^{2} F + 2\theta_{1}^{2} T_{2} - 2\theta_{1} \{ P(\gamma_{20} + \theta_{1}^{2} ) + (1 - P)\theta_{1} \theta_{2} \} \{ P\mu_{{Y_{1} }} \gamma_{20} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} \} }}{{n[(1 - P)\theta_{1} \gamma_{02} - P\theta_{2} \gamma_{20} ]^{2} }} = 0. $$
This will be satisfied either if \( \theta_{1} = 0 \) or if
$$ F = - \frac{{\theta_{1} T_{2} - \{ P(\gamma_{20} + \theta_{1}^{2} ) + (1 - P)\theta_{1} \theta_{2} \} \{ P\mu_{{Y_{1} }} \gamma_{20} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} \} }}{{T_{1} \theta_{1} }} .$$
(A.8)
We note that if the value of \( \theta_{1} = 0 \), then \( F \) is undefined; and also such a choice of \( \theta_{1} = 0 \) shows that:
$$ V(\hat{\mu }_{{Y_{2} }}^{*} ) = V(\hat{\mu }_{{Y_{2} }} ). $$
(A.9)
Thus, an investigator should not choose \( \theta_{1} = 0 \) in the proposed new re-scrambling model. For \( \theta_{1} \ne 0 \), there exists a good guess of \( F \) which investigator can make to achieve the minimum variance of \( \hat{\mu }_{{Y_{2} }}^{*} \) given by
$$ \min {\text{V(}}\hat{\mu }_{{Y_{2} }}^{ * } )= V(\hat{\mu }_{{Y_{2} }} ) - \frac{{[\theta_{1} T_{2} - \{ P(\gamma_{20} + \theta_{1}^{2} ) + (1 - P)\theta_{1} \theta_{2} \} \{ P\mu_{{Y_{1} }} \gamma_{20} + (1 - P)\mu_{{Y_{2} }} \gamma_{02} \} ]^{2} }}{{nT_{1} [(1 - P)\theta_{1} \gamma_{02} - P\theta_{2} \gamma_{20} ]^{2} }}, $$
which proves the theorem.□
Appendix B