Power and sample size for case-cohort design
The power of the test is according to
Phi(Z_alpha+m^0.5*theta*sqrt(p_1p_2p_D/q+(1-q)p_D))
where alpha is the significance level, theta is the log-hazard ratio for two groups, p_j, j=1, 2, are the proportion of the two groups in the population. m is the total number of subjects in the subcohort, p_D is the proportion of the failures in the full cohort, and q is the sampling fraction of the subcohort.
Alternatively, the sample size required for the subcohort is
m=nBp_D/(n-B(1-p_D))
where B=(Z_{1-alpha}+Z_beta)^2/(theta^2p_1p_2p_D)), and n is the size of cohort.
When infeaisble configurations are specified, a sample size of -999 is returned.
ccsize(n,q,pD,p1,alpha,theta,power=NULL,verbose=FALSE)
n |
the total number of subjects in the cohort |
q |
the sampling fraction of the subcohort |
pD |
the proportion of the failures in the full cohort |
p1 |
proportions of the two groups (p2=1-p1) |
alpha |
significant level |
theta |
log-hazard ratio for two groups |
power |
if specified, the power for which sample size is calculated |
verbose |
error messages are explicitly printed out |
The returned value is a value indicating the power or required sample size.
Cai J, Zeng D. Sample size/power calculation for case-cohort studies. Biometrics 2004, 60:1015-1024
Programmed for EPIC study
Jing Hua Zhao
## Not run:
# Table 1 of Cai & Zeng (2004).
outfile <- "table1.txt"
cat("n","pD","p1","theta","q","power\n",file=outfile,sep="\t")
alpha <- 0.05
n <- 1000
for(pD in c(0.10,0.05))
{
for(p1 in c(0.3,0.5))
{
for(theta in c(0.5,1.0))
{
for(q in c(0.1,0.2))
{
power <- ccsize(n,q,pD,p1,alpha,theta)
cat(n,"\t",pD,"\t",p1,"\t",theta,"\t",q,"\t",signif(power,3),"\n",
file=outfile,append=TRUE)
}
}
}
}
n <- 5000
for(pD in c(0.05,0.01))
{
for(p1 in c(0.3,0.5))
{
for(theta in c(0.5,1.0))
{
for(q in c(0.01,0.02))
{
power <- ccsize(n,q,pD,p1,alpha,theta)
cat(n,"\t",pD,"\t",p1,"\t",theta,"\t",q,"\t",signif(power,3),"\n",
file=outfile,append=TRUE)
}
}
}
}
table1<-read.table(outfile,header=TRUE,sep="\t")
unlink(outfile)
# ARIC study
outfile <- "aric.txt"
n <- 15792
pD <- 0.03
p1 <- 0.25
alpha <- 0.05
theta <- c(1.35,1.40,1.45)
beta1 <- 0.8
s_nb <- c(1463,722,468)
cat("n","pD","p1","hr","q","power","ssize\n",file=outfile,sep="\t")
for(i in 1:3)
{
q <- s_nb[i]/n
power <- ccsize(n,q,pD,p1,alpha,log(theta[i]))
ssize <- ccsize(n,q,pD,p1,alpha,log(theta[i]),beta1)
cat(n,"\t",pD,"\t",p1,"\t",theta[i],"\t",q,"\t",signif(power,3),"\t",ssize,"\n",
file=outfile,append=TRUE)
}
aric<-read.table(outfile,header=TRUE,sep="\t")
unlink(outfile)
# EPIC study
outfile <- "epic.txt"
n <- 25000
alpha <- 0.00000005
power <- 0.8
s_pD <- c(0.3,0.2,0.1,0.05)
s_p1 <- seq(0.1,0.5,by=0.1)
s_hr <- seq(1.1,1.4,by=0.1)
cat("n","pD","p1","hr","alpha","ssize\n",file=outfile,sep="\t")
# direct calculation
for(pD in s_pD)
{
for(p1 in s_p1)
{
for(hr in s_hr)
{
ssize <- ccsize(n,q,pD,p1,alpha,log(hr),power)
if (ssize>0) cat(n,"\t",pD,"\t",p1,"\t",hr,"\t",alpha,"\t",ssize,"\n",
file=outfile,append=TRUE)
}
}
}
epic<-read.table(outfile,header=TRUE,sep="\t")
unlink(outfile)
# exhaustive search
outfile <- "search.txt"
s_q <- seq(0.01,0.5,by=0.01)
cat("n","pD","p1","hr","nq","alpha","power\n",file=outfile,sep="\t")
for(pD in s_pD)
{
for(p1 in s_p1)
{
for(hr in s_hr)
{
for(q in s_q)
{
power <- ccsize(n,q,pD,p1,alpha,log(hr))
cat(n,"\t",pD,"\t",p1,"\t",hr,"\t",q*n,"\t",alpha,"\t",power,"\n",
file=outfile,append=TRUE)
}
}
}
}
search<-read.table(outfile,header=TRUE,sep="\t")
unlink(outfile)
## End(Not run)Please choose more modern alternatives, such as Google Chrome or Mozilla Firefox.