############# Rinott ############# Rinotth <- function(k, n0, pstar, conf, rep) { # last update 7/13/2015 # function to return estimate of Rinott h and upper confidence bound on it # k = number of systems # n0 = first-stage sample size # pstar = 1-alpha value (PCS) # rep = number of replications to use for estimate # conf = confidence level on upper bound # # set.seed(13) Z <- matrix(rnorm((k-1) * rep), ncol = k-1) Y <- matrix(rchisq((k-1) * rep, n0 - 1.), ncol = k-1) C <- matrix(rchisq(rep, n0 - 1.), ncol = 1.) Cmat <- matrix(C, ncol = k-1, nrow = rep, byrow = F) denom <- sqrt((n0 - 1.) * (1./Y + 1./Cmat)) H <- sort(apply(Z * denom, 1., max)) Hstar <- quantile(H, pstar) upper <- ceiling(pstar * rep + qnorm(conf) * sqrt(pstar * (1. - pstar) * rep) + 0.5) Hupper <- H[upper] list(h = Hstar, UCB = Hupper) } Rinott <- function(k, alpha, n0, delta){ # implements Rinott's procedure # k = number of systems # 1-alpha = desired PCS # n0 = first-stage sample size # delta = indifference-zone parameter # note: uses 99% UCB for Rinott's h h <- Rinotth(k, n0, 1-alpha, 0.99, 10000)$UCB Ybar <- NULL Vars <- NULL Ns <- NULL # loop through the k systems for (x in 1:k){ Y <- MySim(x, n0) S2 <- var(Y) N <- ceiling(h^2*S2/delta^2) if (N > n0){ Y <- c(Y, MySim(x, N-n0)) } Ybar <- c(Ybar, mean(Y)) Vars <- c(Vars, S2) N <- max(N, n0) Ns <- c(Ns, N) } list(Best = which.max(Ybar), Ybar = Ybar, Var = Vars, N = Ns) } ############# Subset ############# Subset <- function(k, alpha, n){ # function to do subset selection # k = number of systems # n = number of replications (equal) # 1-alpha = confidence level # simulate: Yall <- NULL for (x in 1:k){ Yall <- cbind(Yall, MySim(x, n)) } # subset selection Ybar <- apply(Yall, 2, mean) S2 <- apply(Yall, 2, var)/n tval <- qt((1-alpha)^(1/(k-1)), df = n-1) Subset <- 1:k for (i in 1:k){ for (j in 1:k){ if (Ybar[i] < (Ybar[j]-tval*sqrt(S2[i] + S2[j]))){ Subset[i] <- 0 break } } } list(Subset = Subset[Subset != 0], Ybar = Ybar, S2 = S2) } SubsetCRN <- function(k, alpha, n, seed){ # function to do subset selection with CRN # k = number of systems # n = number of replications (equal) # 1-alpha = confidence level # seed = random number seed # simulate: Yall <- NULL for (x in 1:k){ Yall <- cbind(Yall, MySim(x, n, seed)) } # subset selection Ybar <- apply(Yall, 2, mean) S2 <- cov(Yall)/n tval <- qt(1-alpha/(k-1), df = n-1) Subset <- 1:k for (i in 1:k){ for (j in 1:k){ if (Ybar[i] < (Ybar[j]-tval*sqrt(S2[i,i] + S2[j,j] - 2*S2[i,j]))){ Subset[i] <- 0 break } } } list(Subset = Subset[Subset != 0], Ybar = Ybar, S2 = S2, corr=cor(Yall)) } ############# KN ############# KN <- function(k, alpha, n0, delta){ # function do to KN without CRN # k = number of systems # n0 = first-stage sample size # 1-alpha = desired PCS # delta = indifference-zone parameter II <- 1:k Active <- rep(TRUE, k) Elim <- rep(0, k) # get n0 from each system and compute variances Yn0 <- matrix(0, nrow=k, ncol=n0) for (i in 1:k){ for (j in 1:n0){ Yn0[i,j] <- MySim(i) } } S2 <- var(t(Yn0)) # start sequential eta <- 0.5*( (2*alpha/(k-1))^(-2/(n0-1)) -1) h2 <- 2*eta*(n0-1) Ysum <- apply(Yn0, 1, sum) r <- n0 # main elimination loop while(sum(Active)> 1){ r <- r + 1 ATemp <- Active for(i in II[Active]){ Ysum[i] <- Ysum[i] + MySim(i) } for(i in II[Active]){ for(l in II[Active]){ S2diff <- S2[i,i] + S2[l,l] - 2*S2[i,l] W <- max(0, (delta/2)*(h2*S2diff/delta^2 - r)) if(Ysum[i] < Ysum[l] - W){ ATemp[i] <- FALSE Elim[i] <- r break } } } Active <- ATemp } list(Best = II[Active], n = r, Elim=Elim) } ############# Paulson ############# eta <- function(alpha, k, n0){ # eta constant for Paulson's procedure ((alpha/(k-1))^(-2/(k*(n0-1))) - 1) } Paulson <- function(k, alpha, n0, delta){ # function to do Paulson with lambda = delta/2 # k = number of systems # n0 = first-stage sample size # 1-alpha = desired PCS # delta = indifference-zone parameter II <- 1:k Active <- rep(TRUE, k) Elim <- rep(0, k) # get n0 from each system and compute pooled variance Yn0 <- matrix(0, nrow=k, ncol=n0) for (i in 1:k){ for (j in 1:n0){ Yn0[i,j] <- MySim(i) } } S2 <- mean(apply(Yn0,1,var)) # start sequential a <- eta(alpha, k, n0)*k*(n0-1)*S2/delta Ysum <- apply(Yn0, 1, sum) r <- n0 # main elimination loop while(sum(Active)> 1){ r <- r + 1 ATemp <- Active for(i in II[Active]){ Ysum[i] <- Ysum[i] + MySim(i) } for(l in II[Active]) if((Ysum[l] - max(Ysum[Active])) < min(0, -a+delta*r/2)){ ATemp[l] <- FALSE Elim[l] <- r } Active <- ATemp } list(Best = II[Active], n = r, Elim=Elim) } ############# NSGS ############# NSGS <- function(k, alpha, n0, delta){ # implements NSGS # k = number of systems # 1-alpha = desired PCS # n0 = first-stage sample size # delta = indifference-zone parameter # note: uses 99% UCB for Rinott's h h <- Rinotth(k, n0, 1-alpha/2, 0.99, 10000)$UCB # first apply subset selection Yall <- NULL for (x in 1:k){ Yall <- cbind(Yall, MySim(x, n0)) } Ybar <- apply(Yall, 2, mean) S2 <- apply(Yall, 2, var)/n0 tval <- qt((1-alpha/2)^(1/(k-1)), df = n0-1) Subset <- 1:k for (i in 1:k){ for (j in 1:k){ if (Ybar[i] < (Ybar[j]-tval*sqrt(S2[i] + S2[j]))){ Subset[i] <- 0 break } } } # next apply Rinott to the survivors Survivors <- (1:k)[Subset != 0] # loop through the survivors Ybars <- NULL Ns <- NULL if (length(Survivors) > 1){ for (x in Survivors){ N <- ceiling(h^2*S2[x]/delta^2) Y <- Yall[,x] if (N > n0){ Y <- c(Y, MySim(x, N-n0)) } Ybars <- c(Ybars, mean(Y)) N <- max(N, n0) Ns <- c(Ns, N) } Best = Survivors[which.max(Ybars)] } else{ Best <- Survivors Ybars <- Ybar[Survivors] Ns <- n0 } list(Best = Best, Survivors = Survivors, Ybar = Ybars, N = Ns) } ############# bi-PASS ############# # known variance constant for 0.05 is 5.0 bipass <- function(k, c, n0, Nmax){ # sychronized biPASS with pooled variance # k = number of systems # c = constant needed to guarantee EFER # n0 = first-stage sample size # Nmax = maximum number of replications g <- function(t, calpha=c){sqrt((calpha + log(t+1))*(t+1))} II <- 1:k Active <- rep(TRUE, k) # systems not eliminated Elim <- rep(0, k) # rep on which elimination occurs # get n0 reps from each system and compute pooled variance Yn0 <- matrix(0, nrow=k, ncol=n0) for (i in 1:k){ for (j in 1:n0){ Yn0[i,j] <- MySim(i) } } S2 <- mean(apply(Yn0,1,var)) # start sequential elimination Ysum <- apply(Yn0, 1, sum) r <- n0 N <- k*n0 # main elimination loop while(sum(Active) > 1 && N < Nmax){ r <- r + 1 N <- N + sum(Active) for(i in II[Active]){ Ysum[i] <- Ysum[i] + MySim(i) } rmuhat <- sum(Ysum[Active])/sum(Active) for(l in II[Active]){ if(Ysum[l] - rmuhat <= -g(r/S2)*S2){ Active[l] <- FALSE Elim[l] <- r } } # print(Active) } list(Best = II[Active], n = r, Elim=Elim) } ############# CEI ############# cei <- function(k, n0, Nmax){ # Ryzhov CEI that simulates the top # CEI solution or the sample best on each iteration # k = number of systems # n0 = reps for initial variance estimate (recommend 2) # Nmax = max observations before termination f <- function(z){z*pnorm(z) + dnorm(z)} Ybar <- rep(0, k) # vector of sample means Sum2 <- rep(0, k) # vector of sums of squares N <- rep(0, k) # vector of sample sizes Y <- rep(0, k) CEI <- rep(0, k) systems <- 1:k xpath <- NULL Ypath <- NULL # get n0 reps from each system for (i in 1:k){ for (j in 1:n0){ Y[j] <- MySim(i) } Ybar[i] <- mean(Y) Sum2[i] <- (n0-1)*var(Y) N[i] <- n0 } x <- which.max(Ybar) # start sequential allocation while(sum(N) < Nmax){ xstar <- which.max(Ybar) # current sample best xpath <- c(xpath, x) Ypath <- c(Ypath, Ybar[xstar]) # Npath <- c(Npath, sum(N)) # check if sample best has too few reps if (2*N[xstar]^2/(Sum2[xstar]/(N[xstar]-1)) < sum(N^2/(Sum2/(N-1)))){ x <- xstar } else{ # calculate CEIs S2 <- Sum2/(N - 1)/N for (i in systems){ v <- sqrt(S2[xstar] + S2[i]) CEI[i] <- v*f(-abs(Ybar[i] - Ybar[xstar])/v) CEI[xstar] <- 0 } x <- which.max(CEI) } # simulate x and update statistics Yx <- MySim(x) difference <- Yx - Ybar[x] Ybar[x] <- Ybar[x] + difference/(N[x]+1) Sum2[x] <- Sum2[x] + difference*(Yx - Ybar[x]) N[x] <- N[x] + 1 } list(xstar = xstar, xpath = xpath, Ypath = Ypath, N = N) } ############# Bootstrap R&S ############# bootRS <- function(k, alpha, n0, delta, B, dn){ # procedure to implement bootstrap R&S # k = number of systems # n0 = first-stage sample size # 1-alpha = desired PCS # delta = indifference-zone parameter # B = number of bootstrap samples to estimate PGS # dn = increment to increase n0 PGS <- 0 Yall <- NULL for (x in 1:k){ Yall <- cbind(Yall, MySim(x, n0)) } # increment n0 until bootstrap PGS >= 1 - alpha while(TRUE){ bsum <- 0 Ybar <- apply(Yall, 2, mean) xstar <- which.max(Ybar) for (i in 1:B){ Ybarstar <- apply(apply(Yall, 2, sample, replace=TRUE), 2, mean) diffs <- Ybarstar - Ybarstar[xstar] - (Ybar - Ybar[xstar]) bsum <- bsum + prod(as.numeric(diffs <= delta)) } PGS <- bsum/B print(c("N=", n0, "PGS =",PGS)) if (PGS < 1 - alpha){ Ytemp <- NULL for (x in 1:k){ Ytemp <- cbind(Ytemp, MySim(x, dn)) } Yall <- rbind(Yall, Ytemp) n0 <- n0 + dn } else{break} } list(Best = xstar, PGS=PGS, N = n0) } ############# End Procedures #############