R Under development (unstable) (2025-01-24 r87624) -- "Unsuffered Consequences"
Copyright (C) 2025 The R Foundation for Statistical Computing
Platform: aarch64-apple-darwin24.2.0

R is free software and comes with ABSOLUTELY NO WARRANTY.
You are welcome to redistribute it under certain conditions.
Type 'license()' or 'licence()' for distribution details.

R is a collaborative project with many contributors.
Type 'contributors()' for more information and
'citation()' on how to cite R or R packages in publications.

Type 'demo()' for some demos, 'help()' for on-line help, or
'help.start()' for an HTML browser interface to help.
Type 'q()' to quit R.

> pkgname <- "stats"
> source(file.path(R.home("share"), "R", "examples-header.R"))
> options(warn = 1)
> library('stats')
> 
> base::assign(".oldSearch", base::search(), pos = 'CheckExEnv')
> base::assign(".old_wd", base::getwd(), pos = 'CheckExEnv')
> cleanEx()
> nameEx("AIC")
> ### * AIC
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: AIC
> ### Title: Akaike's An Information Criterion
> ### Aliases: AIC BIC
> ### Keywords: models
> 
> ### ** Examples
> 
> lm1 <- lm(Fertility ~ . , data = swiss)
> AIC(lm1)
[1] 326.0716
> stopifnot(all.equal(AIC(lm1),
+                     AIC(logLik(lm1))))
> BIC(lm1)
[1] 339.0226
> 
> lm2 <- update(lm1, . ~ . -Examination)
> AIC(lm1, lm2)
    df      AIC
lm1  7 326.0716
lm2  6 325.2408
> BIC(lm1, lm2)
    df      BIC
lm1  7 339.0226
lm2  6 336.3417
> 
> 
> 
> cleanEx()
> nameEx("ARMAacf")
> ### * ARMAacf
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ARMAacf
> ### Title: Compute Theoretical ACF for an ARMA Process
> ### Aliases: ARMAacf
> ### Keywords: ts
> 
> ### ** Examples
> 
> ARMAacf(c(1.0, -0.25), 1.0, lag.max = 10)
          0           1           2           3           4           5 
1.000000000 0.875000000 0.625000000 0.406250000 0.250000000 0.148437500 
          6           7           8           9          10 
0.085937500 0.048828125 0.027343750 0.015136719 0.008300781 
> 
> ## Example from Brockwell & Davis (1991, pp.92-4)
> ## answer: 2^(-n) * (32/3 + 8 * n) /(32/3)
> n <- 1:10
> a.n <- 2^(-n) * (32/3 + 8 * n) /(32/3)
> (A.n <- ARMAacf(c(1.0, -0.25), 1.0, lag.max = 10))
          0           1           2           3           4           5 
1.000000000 0.875000000 0.625000000 0.406250000 0.250000000 0.148437500 
          6           7           8           9          10 
0.085937500 0.048828125 0.027343750 0.015136719 0.008300781 
> stopifnot(all.equal(unname(A.n), c(1, a.n)))
> 
> ARMAacf(c(1.0, -0.25), 1.0, lag.max = 10, pacf = TRUE)
 [1]  0.8750000 -0.6000000  0.3750000 -0.2727273  0.2142857 -0.1764706
 [7]  0.1500000 -0.1304348  0.1153846 -0.1034483
> zapsmall(ARMAacf(c(1.0, -0.25), lag.max = 10, pacf = TRUE))
 [1]  0.80 -0.25  0.00  0.00  0.00  0.00  0.00  0.00  0.00  0.00
> 
> ## Cov-Matrix of length-7 sub-sample of AR(1) example:
> toeplitz(ARMAacf(0.8, lag.max = 7))
          [,1]     [,2]    [,3]   [,4]   [,5]    [,6]     [,7]      [,8]
[1,] 1.0000000 0.800000 0.64000 0.5120 0.4096 0.32768 0.262144 0.2097152
[2,] 0.8000000 1.000000 0.80000 0.6400 0.5120 0.40960 0.327680 0.2621440
[3,] 0.6400000 0.800000 1.00000 0.8000 0.6400 0.51200 0.409600 0.3276800
[4,] 0.5120000 0.640000 0.80000 1.0000 0.8000 0.64000 0.512000 0.4096000
[5,] 0.4096000 0.512000 0.64000 0.8000 1.0000 0.80000 0.640000 0.5120000
[6,] 0.3276800 0.409600 0.51200 0.6400 0.8000 1.00000 0.800000 0.6400000
[7,] 0.2621440 0.327680 0.40960 0.5120 0.6400 0.80000 1.000000 0.8000000
[8,] 0.2097152 0.262144 0.32768 0.4096 0.5120 0.64000 0.800000 1.0000000
> 
> 
> 
> cleanEx()
> nameEx("ARMAtoMA")
> ### * ARMAtoMA
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ARMAtoMA
> ### Title: Convert ARMA Process to Infinite MA Process
> ### Aliases: ARMAtoMA
> ### Keywords: ts
> 
> ### ** Examples
> 
> ARMAtoMA(c(1.0, -0.25), 1.0, 10)
 [1] 2.00000000 1.75000000 1.25000000 0.81250000 0.50000000 0.29687500
 [7] 0.17187500 0.09765625 0.05468750 0.03027344
> ## Example from Brockwell & Davis (1991, p.92)
> ## answer (1 + 3*n)*2^(-n)
> n <- 1:10; (1 + 3*n)*2^(-n)
 [1] 2.00000000 1.75000000 1.25000000 0.81250000 0.50000000 0.29687500
 [7] 0.17187500 0.09765625 0.05468750 0.03027344
> 
> 
> 
> cleanEx()
> nameEx("Beta")
> ### * Beta
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Beta
> ### Title: The Beta Distribution
> ### Aliases: Beta dbeta pbeta qbeta rbeta
> ### Keywords: distribution
> 
> ### ** Examples
> 
> x <- seq(0, 1, length.out = 21)
> dbeta(x, 1, 1)
 [1] 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
> pbeta(x, 1, 1)
 [1] 0.00 0.05 0.10 0.15 0.20 0.25 0.30 0.35 0.40 0.45 0.50 0.55 0.60 0.65 0.70
[16] 0.75 0.80 0.85 0.90 0.95 1.00
> 
> ## Visualization, including limit cases:
> pl.beta <- function(a,b, asp = if(isLim) 1, ylim = if(isLim) c(0,1.1)) {
+   if(isLim <- a == 0 || b == 0 || a == Inf || b == Inf) {
+     eps <- 1e-10
+     x <- c(0, eps, (1:7)/16, 1/2+c(-eps,0,eps), (9:15)/16, 1-eps, 1)
+   } else {
+     x <- seq(0, 1, length.out = 1025)
+   }
+   fx <- cbind(dbeta(x, a,b), pbeta(x, a,b), qbeta(x, a,b))
+   f <- fx; f[fx == Inf] <- 1e100
+   matplot(x, f, ylab="", type="l", ylim=ylim, asp=asp,
+           main = sprintf("[dpq]beta(x, a=%g, b=%g)", a,b))
+   abline(0,1,     col="gray", lty=3)
+   abline(h = 0:1, col="gray", lty=3)
+   legend("top", paste0(c("d","p","q"), "beta(x, a,b)"),
+          col=1:3, lty=1:3, bty = "n")
+   invisible(cbind(x, fx))
+ }
> pl.beta(3,1)
> 
> pl.beta(2, 4)
> pl.beta(3, 7)
> pl.beta(3, 7, asp=1)
> 
> pl.beta(0, 0)   ## point masses at  {0, 1}
> 
> pl.beta(0, 2)   ## point mass at 0 ; the same as
> pl.beta(1, Inf)
> 
> pl.beta(Inf, 2) ## point mass at 1 ; the same as
> pl.beta(3, 0)
> 
> pl.beta(Inf, Inf)# point mass at 1/2
> 
> 
> 
> cleanEx()
> nameEx("Binomial")
> ### * Binomial
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Binomial
> ### Title: The Binomial Distribution
> ### Aliases: Binomial dbinom pbinom qbinom rbinom
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> # Compute P(45 < X < 55) for X Binomial(100,0.5)
> sum(dbinom(46:54, 100, 0.5))
[1] 0.6317984
> 
> ## Using "log = TRUE" for an extended range :
> n <- 2000
> k <- seq(0, n, by = 20)
> plot (k, dbinom(k, n, pi/10, log = TRUE), type = "l", ylab = "log density",
+       main = "dbinom(*, log=TRUE) is better than  log(dbinom(*))")
> lines(k, log(dbinom(k, n, pi/10)), col = "red", lwd = 2)
> ## extreme points are omitted since dbinom gives 0.
> mtext("dbinom(k, log=TRUE)", adj = 0)
> mtext("extended range", adj = 0, line = -1, font = 4)
> mtext("log(dbinom(k))", col = "red", adj = 1)
> 
> 
> 
> cleanEx()
> nameEx("Cauchy")
> ### * Cauchy
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Cauchy
> ### Title: The Cauchy Distribution
> ### Aliases: Cauchy dcauchy pcauchy qcauchy rcauchy
> ### Keywords: distribution
> 
> ### ** Examples
> 
> dcauchy(-1:4)
[1] 0.15915494 0.31830989 0.15915494 0.06366198 0.03183099 0.01872411
> 
> 
> 
> cleanEx()
> nameEx("Chisquare")
> ### * Chisquare
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Chisquare
> ### Title: The (non-central) Chi-Squared Distribution
> ### Aliases: Chisquare dchisq pchisq qchisq rchisq
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
> dchisq(1, df = 1:3)
[1] 0.2419707 0.3032653 0.2419707
> pchisq(1, df =  3)
[1] 0.198748
> pchisq(1, df =  3, ncp = 0:4)  # includes the above
[1] 0.19874804 0.13229855 0.08787311 0.05824691 0.03853592
> 
> x <- 1:10
> ## Chi-squared(df = 2) is a special exponential distribution
> all.equal(dchisq(x, df = 2), dexp(x, 1/2))
[1] TRUE
> all.equal(pchisq(x, df = 2), pexp(x, 1/2))
[1] TRUE
> 
> ## non-central RNG -- df = 0 with ncp > 0:  Z0 has point mass at 0!
> Z0 <- rchisq(100, df = 0, ncp = 2.)
> graphics::stem(Z0)

  The decimal point is at the |

  0 | 0000000000000000000000000000000000000013356778899
  1 | 0001333456678888899
  2 | 0011444467
  3 | 00233345888
  4 | 111246
  5 | 
  6 | 
  7 | 178
  8 | 23

> 
> 
> ## "analytical" test
> lam <- seq(0, 100, by = .25)
> p00 <- pchisq(0,      df = 0, ncp = lam)
> p.0 <- pchisq(1e-300, df = 0, ncp = lam)
> stopifnot(all.equal(p00, exp(-lam/2)),
+           all.equal(p.0, exp(-lam/2)))
> 
> 
> 
> cleanEx()
> nameEx("Exponential")
> ### * Exponential
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Exponential
> ### Title: The Exponential Distribution
> ### Aliases: Exponential dexp pexp qexp rexp
> ### Keywords: distribution
> 
> ### ** Examples
> 
> dexp(1) - exp(-1) #-> 0
[1] 0
> 
> ## a fast way to generate *sorted*  U[0,1]  random numbers:
> rsunif <- function(n) { n1 <- n+1
+    cE <- cumsum(rexp(n1)); cE[seq_len(n)]/cE[n1] }
> plot(rsunif(1000), ylim=0:1, pch=".")
> abline(0,1/(1000+1), col=adjustcolor(1, 0.5))
> 
> 
> 
> cleanEx()
> nameEx("Fdist")
> ### * Fdist
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: FDist
> ### Title: The F Distribution
> ### Aliases: FDist df pf qf rf
> ### Keywords: distribution
> 
> ### ** Examples
> 
> ## Equivalence of pt(.,nu) with pf(.^2, 1,nu):
> x <- seq(0.001, 5, length.out = 100)
> nu <- 4
> stopifnot(all.equal(2*pt(x,nu) - 1, pf(x^2, 1,nu)),
+           ## upper tails:
+  	  all.equal(2*pt(x,     nu, lower.tail=FALSE),
+ 		      pf(x^2, 1,nu, lower.tail=FALSE)))
> 
> ## the density of the square of a t_m is 2*dt(x, m)/(2*x)
> # check this is the same as the density of F_{1,m}
> all.equal(df(x^2, 1, 5), dt(x, 5)/x)
[1] TRUE
> 
> ## Identity (F <-> t):  qf(2*p - 1, 1, df) == qt(p, df)^2  for  p >= 1/2
> p <- seq(1/2, .99, length.out = 50); df <- 10
> rel.err <- function(x, y) ifelse(x == y, 0, abs(x-y)/mean(abs(c(x,y))))
> stopifnot(all.equal(qf(2*p - 1, df1 = 1, df2 = df),
+                     qt(p, df)^2))
> 
> ## Identity (F <-> Beta <-> incompl.beta):
> n1 <- 7 ; n2 <- 12; qF <- c((0:4)/4, 1.5, 2:16)
> x <- n2/(n2 + n1*qF)
> stopifnot(all.equal(pf(qF, n1, n2, lower.tail=FALSE),
+                     pbeta(x, n2/2, n1/2)))
> 
> 
> 
> cleanEx()
> nameEx("GammaDist")
> ### * GammaDist
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: GammaDist
> ### Title: The Gamma Distribution
> ### Aliases: GammaDist dgamma pgamma qgamma rgamma
> ### Keywords: distribution
> 
> ### ** Examples
> 
> -log(dgamma(1:4, shape = 1))
[1] 1 2 3 4
> p <- (1:9)/10
> pgamma(qgamma(p, shape = 2), shape = 2)
[1] 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9
> 1 - 1/exp(qgamma(p, shape = 1))
[1] 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9
> 
> 
> 
> cleanEx()
> nameEx("Geometric")
> ### * Geometric
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Geometric
> ### Title: The Geometric Distribution
> ### Aliases: Geometric dgeom pgeom qgeom rgeom
> ### Keywords: distribution
> 
> ### ** Examples
> 
> qgeom((1:9)/10, prob = .2)
[1]  0  0  1  2  3  4  5  7 10
> Ni <- rgeom(20, prob = 1/4); table(factor(Ni, 0:max(Ni)))

 0  1  2  3  4  5  6  7  8  9 10 11 
 5  3  3  1  2  2  0  1  0  1  1  1 
> 
> 
> 
> cleanEx()
> nameEx("HoltWinters")
> ### * HoltWinters
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: HoltWinters
> ### Title: Holt-Winters Filtering
> ### Aliases: HoltWinters print.HoltWinters residuals.HoltWinters
> ### Keywords: ts
> 
> ### ** Examples
> 
> ## Don't show: 
> od <- options(digits = 5)
> ## End(Don't show)
> require(graphics)
> 
> ## Seasonal Holt-Winters
> (m <- HoltWinters(co2))
Holt-Winters exponential smoothing with trend and additive seasonal component.

Call:
HoltWinters(x = co2)

Smoothing parameters:
 alpha: 0.51265
 beta : 0.0094977
 gamma: 0.47289

Coefficients:
         [,1]
a   364.76162
b     0.12474
s1    0.22153
s2    0.95528
s3    1.59847
s4    2.87580
s5    3.28201
s6    2.44070
s7    0.89694
s8   -1.37964
s9   -3.41124
s10  -3.25702
s11  -1.91349
s12  -0.58442
> plot(m)
> plot(fitted(m))
> 
> (m <- HoltWinters(AirPassengers, seasonal = "mult"))
Holt-Winters exponential smoothing with trend and multiplicative seasonal component.

Call:
HoltWinters(x = AirPassengers, seasonal = "mult")

Smoothing parameters:
 alpha: 0.27559
 beta : 0.032693
 gamma: 0.87073

Coefficients:
         [,1]
a   469.32322
b     3.02154
s1    0.94646
s2    0.88292
s3    0.97174
s4    1.03048
s5    1.04769
s6    1.18053
s7    1.35908
s8    1.33317
s9    1.10834
s10   0.98688
s11   0.83613
s12   0.92099
> plot(m)
> 
> ## Non-Seasonal Holt-Winters
> x <- uspop + rnorm(uspop, sd = 5)
> m <- HoltWinters(x, gamma = FALSE)
> plot(m)
> 
> ## Exponential Smoothing
> m2 <- HoltWinters(x, gamma = FALSE, beta = FALSE)
> lines(fitted(m2)[,1], col = 3)
> ## Don't show: 
> options(od)
> ## End(Don't show)
> 
> 
> 
> cleanEx()
> nameEx("Hypergeometric")
> ### * Hypergeometric
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Hypergeometric
> ### Title: The Hypergeometric Distribution
> ### Aliases: Hypergeometric dhyper phyper qhyper rhyper
> ### Keywords: distribution
> 
> ### ** Examples
> 
> m <- 10; n <- 7; k <- 8
> x <- 0:(k+1)
> rbind(phyper(x, m, n, k), dhyper(x, m, n, k))
     [,1]         [,2]       [,3]     [,4]      [,5]      [,6]      [,7]
[1,]    0 0.0004113534 0.01336898 0.117030 0.4193747 0.7821884 0.9635952
[2,]    0 0.0004113534 0.01295763 0.103661 0.3023447 0.3628137 0.1814068
           [,8]       [,9] [,10]
[1,] 0.99814891 1.00000000     1
[2,] 0.03455368 0.00185109     0
> all(phyper(x, m, n, k) == cumsum(dhyper(x, m, n, k)))  # FALSE
[1] FALSE
> stopifnot(abs(phyper(x, m, n, k) - cumsum(dhyper(x, m, n, k))) < 5e-16)
> 
> 
> 
> cleanEx()
> nameEx("IQR")
> ### * IQR
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: IQR
> ### Title: The Interquartile Range
> ### Aliases: IQR
> ### Keywords: univar robust distribution
> 
> ### ** Examples
> 
> IQR(rivers)
[1] 370
> 
> 
> 
> cleanEx()
> nameEx("KalmanLike")
> ### * KalmanLike
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: KalmanLike
> ### Title: Kalman Filtering
> ### Aliases: KalmanLike KalmanRun KalmanSmooth KalmanForecast makeARIMA
> ### Keywords: ts
> 
> ### ** Examples
> 
> ## an ARIMA fit
> fit3 <- arima(presidents, c(3, 0, 0))
> predict(fit3, 12)
$pred
         Qtr1     Qtr2     Qtr3     Qtr4
1975 29.84194 34.41014 39.30815 43.02779
1976 46.18808 48.56947 50.44866 51.86064
1977 52.94295 53.75521 54.37019 54.83150

$se
         Qtr1     Qtr2     Qtr3     Qtr4
1975  9.00655 11.25606 13.43389 14.51516
1976 15.25538 15.65611 15.90158 16.03792
1977 16.11764 16.16229 16.18785 16.20220

> ## reconstruct this
> pr <- KalmanForecast(12, fit3$model)
> pr$pred + fit3$coef[4]
 [1] 29.84194 34.41014 39.30815 43.02779 46.18808 48.56947 50.44866 51.86064
 [9] 52.94295 53.75521 54.37019 54.83150
> sqrt(pr$var * fit3$sigma2)
 [1]  9.00655 11.25606 13.43389 14.51516 15.25538 15.65611 15.90158 16.03792
 [9] 16.11764 16.16229 16.18785 16.20220
> ## and now do it year by year
> mod <- fit3$model
> for(y in 1:3) {
+   pr <- KalmanForecast(4, mod, TRUE)
+   print(list(pred = pr$pred + fit3$coef["intercept"], 
+              se = sqrt(pr$var * fit3$sigma2)))
+   mod <- attr(pr, "mod")
+ }
$pred
[1] 29.84194 34.41014 39.30815 43.02779

$se
[1]  9.00655 11.25606 13.43389 14.51516

$pred
[1] 46.18808 48.56947 50.44866 51.86064

$se
[1] 15.25538 15.65611 15.90158 16.03792

$pred
[1] 52.94295 53.75521 54.37019 54.83150

$se
[1] 16.11764 16.16229 16.18785 16.20220

> 
> 
> 
> cleanEx()
> nameEx("Logistic")
> ### * Logistic
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Logistic
> ### Title: The Logistic Distribution
> ### Aliases: Logistic dlogis plogis qlogis rlogis
> ### Keywords: distribution
> 
> ### ** Examples
> 
> var(rlogis(4000, 0, scale = 5))  # approximately (+/- 3)
[1] 86.93007
> pi^2/3 * 5^2
[1] 82.2467
> 
> 
> 
> cleanEx()
> nameEx("Lognormal")
> ### * Lognormal
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Lognormal
> ### Title: The Log Normal Distribution
> ### Aliases: Lognormal dlnorm plnorm qlnorm rlnorm
> ### Keywords: distribution
> 
> ### ** Examples
> 
> dlnorm(1) == dnorm(0)
[1] TRUE
> 
> 
> 
> cleanEx()
> nameEx("Multinom")
> ### * Multinom
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Multinom
> ### Title: The Multinomial Distribution
> ### Aliases: Multinomial rmultinom dmultinom
> ### Keywords: distribution
> 
> ### ** Examples
> 
> rmultinom(10, size = 12, prob = c(0.1,0.2,0.8))
     [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
[1,]    0    1    0    3    1    0    1    2    2     1
[2,]    2    4    4    2    0    1    2    2    5     3
[3,]   10    7    8    7   11   11    9    8    5     8
> 
> pr <- c(1,3,6,10) # normalization not necessary for generation
> rmultinom(10, 20, prob = pr)
     [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
[1,]    3    0    0    0    1    1    1    1    1     1
[2,]    1    2    3    3    2    4    3    4    4     4
[3,]    7    6    9    7    8    3    8    6    2     7
[4,]    9   12    8   10    9   12    8    9   13     8
> 
> ## all possible outcomes of Multinom(N = 3, K = 3)
> X <- t(as.matrix(expand.grid(0:3, 0:3))); X <- X[, colSums(X) <= 3]
> X <- rbind(X, 3:3 - colSums(X)); dimnames(X) <- list(letters[1:3], NULL)
> X
  [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
a    0    1    2    3    0    1    2    0    1     0
b    0    0    0    0    1    1    1    2    2     3
c    3    2    1    0    2    1    0    1    0     0
> round(apply(X, 2, function(x) dmultinom(x, prob = c(1,2,5))), 3)
 [1] 0.244 0.146 0.029 0.002 0.293 0.117 0.012 0.117 0.023 0.016
> 
> 
> 
> cleanEx()
> nameEx("NLSstAsymptotic")
> ### * NLSstAsymptotic
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: NLSstAsymptotic
> ### Title: Fit the Asymptotic Regression Model
> ### Aliases: NLSstAsymptotic NLSstAsymptotic.sortedXyData
> ### Keywords: manip
> 
> ### ** Examples
> 
> Lob.329 <- Loblolly[ Loblolly$Seed == "329", ]
> print(NLSstAsymptotic(sortedXyData(expression(age),
+                                    expression(height),
+                                    Lob.329)), digits = 3)
    b0     b1    lrc 
 -8.25 102.38  -3.22 
> 
> 
> 
> cleanEx()
> nameEx("NLSstClosestX")
> ### * NLSstClosestX
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: NLSstClosestX
> ### Title: Inverse Interpolation
> ### Aliases: NLSstClosestX NLSstClosestX.sortedXyData
> ### Keywords: manip
> 
> ### ** Examples
> 
> DNase.2 <- DNase[ DNase$Run == "2", ]
> DN.srt <- sortedXyData(expression(log(conc)), expression(density), DNase.2)
> NLSstClosestX(DN.srt, 1.0)
[1] 0.9795406
> 
> 
> 
> cleanEx()
> nameEx("NLSstLfAsymptote")
> ### * NLSstLfAsymptote
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: NLSstLfAsymptote
> ### Title: Horizontal Asymptote on the Left Side
> ### Aliases: NLSstLfAsymptote NLSstLfAsymptote.sortedXyData
> ### Keywords: manip
> 
> ### ** Examples
> 
> DNase.2 <- DNase[ DNase$Run == "2", ]
> DN.srt <- sortedXyData( expression(log(conc)), expression(density), DNase.2 )
> NLSstLfAsymptote( DN.srt )
[1] -0.1869375
> 
> 
> 
> cleanEx()
> nameEx("NLSstRtAsymptote")
> ### * NLSstRtAsymptote
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: NLSstRtAsymptote
> ### Title: Horizontal Asymptote on the Right Side
> ### Aliases: NLSstRtAsymptote NLSstRtAsymptote.sortedXyData
> ### Keywords: manip
> 
> ### ** Examples
> 
> DNase.2 <- DNase[ DNase$Run == "2", ]
> DN.srt <- sortedXyData( expression(log(conc)), expression(density), DNase.2 )
> NLSstRtAsymptote( DN.srt )
[1] 2.157437
> 
> 
> 
> cleanEx()
> nameEx("NegBinomial")
> ### * NegBinomial
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: NegBinomial
> ### Title: The Negative Binomial Distribution
> ### Aliases: NegBinomial dnbinom pnbinom qnbinom rnbinom
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> x <- 0:11
> dnbinom(x, size = 1, prob = 1/2) * 2^(1 + x) # == 1
 [1] 1 1 1 1 1 1 1 1 1 1 1 1
> 126 /  dnbinom(0:8, size  = 2, prob  = 1/2) #- theoretically integer
[1]   504.0   504.0   672.0  1008.0  1612.8  2688.0  4608.0  8064.0 14336.0
> 
> 
> x <- 0:15
> size <- (1:20)/4
> persp(x, size, dnb <- outer(x, size, function(x,s) dnbinom(x, s, prob = 0.4)),
+       xlab = "x", ylab = "s", zlab = "density", theta = 150)
> title(tit <- "negative binomial density(x,s, pr = 0.4)  vs.  x & s")
> 
> image  (x, size, log10(dnb), main = paste("log [", tit, "]"))
> contour(x, size, log10(dnb), add = TRUE)
> 
> ## Alternative parametrization
> x1 <- rnbinom(500, mu = 4, size = 1)
> x2 <- rnbinom(500, mu = 4, size = 10)
> x3 <- rnbinom(500, mu = 4, size = 100)
> h1 <- hist(x1, breaks = 20, plot = FALSE)
> h2 <- hist(x2, breaks = h1$breaks, plot = FALSE)
> h3 <- hist(x3, breaks = h1$breaks, plot = FALSE)
> barplot(rbind(h1$counts, h2$counts, h3$counts),
+         beside = TRUE, col = c("red","blue","cyan"),
+         names.arg = round(h1$breaks[-length(h1$breaks)]))
> 
> 
> 
> cleanEx()
> nameEx("Normal")
> ### * Normal
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Normal
> ### Title: The Normal Distribution
> ### Aliases: Normal dnorm pnorm qnorm rnorm
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
> dnorm(0) == 1/sqrt(2*pi)
[1] TRUE
> dnorm(1) == exp(-1/2)/sqrt(2*pi)
[1] TRUE
> dnorm(1) == 1/sqrt(2*pi*exp(1))
[1] TRUE
> 
> ## Using "log = TRUE" for an extended range :
> par(mfrow = c(2,1))
> plot(function(x) dnorm(x, log = TRUE), -60, 50,
+      main = "log { Normal density }")
> curve(log(dnorm(x)), add = TRUE, col = "red", lwd = 2)
> mtext("dnorm(x, log=TRUE)", adj = 0)
> mtext("log(dnorm(x))", col = "red", adj = 1)
> 
> plot(function(x) pnorm(x, log.p = TRUE), -50, 10,
+      main = "log { Normal Cumulative }")
> curve(log(pnorm(x)), add = TRUE, col = "red", lwd = 2)
> mtext("pnorm(x, log=TRUE)", adj = 0)
> mtext("log(pnorm(x))", col = "red", adj = 1)
> 
> ## if you want the so-called 'error function'
> erf <- function(x) 2 * pnorm(x * sqrt(2)) - 1
> ## (see Abramowitz and Stegun 29.2.29)
> ## and the so-called 'complementary error function'
> erfc <- function(x) 2 * pnorm(x * sqrt(2), lower = FALSE)
> ## and the inverses
> erfinv <- function (x) qnorm((1 + x)/2)/sqrt(2)
> erfcinv <- function (x) qnorm(x/2, lower = FALSE)/sqrt(2)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("Poisson")
> ### * Poisson
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Poisson
> ### Title: The Poisson Distribution
> ### Aliases: Poisson dpois ppois qpois rpois
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
> -log(dpois(0:7, lambda = 1) * gamma(1+ 0:7)) # == 1
[1] 1 1 1 1 1 1 1 1
> Ni <- rpois(50, lambda = 4); table(factor(Ni, 0:max(Ni)))

 0  1  2  3  4  5  6  7  8  9 10 
 1  2  7  9  8 13  5  4  0  0  1 
> 
> 1 - ppois(10*(15:25), lambda = 100)  # becomes 0 (cancellation)
 [1] 1.233094e-06 1.261664e-08 7.085799e-11 2.252643e-13 4.440892e-16
 [6] 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00 0.000000e+00
[11] 0.000000e+00
>     ppois(10*(15:25), lambda = 100, lower.tail = FALSE)  # no cancellation
 [1] 1.233094e-06 1.261664e-08 7.085800e-11 2.253110e-13 4.174239e-16
 [6] 4.626179e-19 3.142097e-22 1.337219e-25 3.639328e-29 6.453883e-33
[11] 7.587807e-37
> 
> par(mfrow = c(2, 1))
> x <- seq(-0.01, 5, 0.01)
> plot(x, ppois(x, 1), type = "s", ylab = "F(x)", main = "Poisson(1) CDF")
> plot(x, pbinom(x, 100, 0.01), type = "s", ylab = "F(x)",
+      main = "Binomial(100, 0.01) CDF")
> 
> ## The (limit) case  lambda = 0 :
> stopifnot(identical(dpois(0,0), 1),
+ 	  identical(ppois(0,0), 1),
+ 	  identical(qpois(1,0), 0))
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSD")
> ### * SSD
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSD
> ### Title: SSD Matrix and Estimated Variance Matrix in Multivariate Models
> ### Aliases: SSD estVar
> ### Keywords: models multivariate
> 
> ### ** Examples
> 
> # Lifted from Baron+Li:
> # "Notes on the use of R for psychology experiments and questionnaires"
> # Maxwell and Delaney, p. 497
> reacttime <- matrix(c(
+ 420, 420, 480, 480, 600, 780,
+ 420, 480, 480, 360, 480, 600,
+ 480, 480, 540, 660, 780, 780,
+ 420, 540, 540, 480, 780, 900,
+ 540, 660, 540, 480, 660, 720,
+ 360, 420, 360, 360, 480, 540,
+ 480, 480, 600, 540, 720, 840,
+ 480, 600, 660, 540, 720, 900,
+ 540, 600, 540, 480, 720, 780,
+ 480, 420, 540, 540, 660, 780),
+ ncol = 6, byrow = TRUE,
+ dimnames = list(subj = 1:10,
+               cond = c("deg0NA", "deg4NA", "deg8NA",
+                        "deg0NP", "deg4NP", "deg8NP")))
> 
> mlmfit <- lm(reacttime ~ 1)
> SSD(mlmfit)
$SSD
        cond
cond     deg0NA deg4NA deg8NA deg0NP deg4NP deg8NP
  deg0NA  29160  30600  26640  23760  32400  25560
  deg4NA  30600  66600  32400   7200  36000  30600
  deg8NA  26640  32400  56160  41040  57600  69840
  deg0NP  23760   7200  41040  70560  72000  63360
  deg4NP  32400  36000  57600  72000 108000 100800
  deg8NP  25560  30600  69840  63360 100800 122760

$call
lm(formula = reacttime ~ 1)

$df
[1] 9

attr(,"class")
[1] "SSD"
> estVar(mlmfit)
        cond
cond     deg0NA deg4NA deg8NA deg0NP deg4NP deg8NP
  deg0NA   3240   3400   2960   2640   3600   2840
  deg4NA   3400   7400   3600    800   4000   3400
  deg8NA   2960   3600   6240   4560   6400   7760
  deg0NP   2640    800   4560   7840   8000   7040
  deg4NP   3600   4000   6400   8000  12000  11200
  deg8NP   2840   3400   7760   7040  11200  13640
> 
> 
> 
> cleanEx()
> nameEx("SSasymp")
> ### * SSasymp
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSasymp
> ### Title: Self-Starting 'nls' Asymptotic Model
> ### Aliases: SSasymp
> ### Keywords: models
> 
> ### ** Examples
> 
> ## Don't show: 
> options(show.nls.convergence=FALSE)
> ## End(Don't show)
> Lob.329 <- Loblolly[ Loblolly$Seed == "329", ]
> SSasymp( Lob.329$age, 100, -8.5, -3.2 )   # response only
[1]  3.988924 11.505611 27.822517 41.130854 51.985354 60.838463
> local({
+   Asym <- 100 ; resp0 <- -8.5 ; lrc <- -3.2
+   SSasymp( Lob.329$age, Asym, resp0, lrc) # response _and_ gradient
+ })
[1]  3.988924 11.505611 27.822517 41.130854 51.985354 60.838463
attr(,"gradient")
          Asym     resp0      lrc
[1,] 0.1151053 0.8848947 11.74087
[2,] 0.1843835 0.8156165 18.03613
[3,] 0.3347697 0.6652303 29.42113
[4,] 0.4574272 0.5425728 35.99454
[5,] 0.5574687 0.4425313 39.14366
[6,] 0.6390642 0.3609358 39.90776
> getInitial(height ~ SSasymp( age, Asym, resp0, lrc), data = Lob.329)
     Asym     resp0       lrc 
94.128204 -8.250753 -3.217578 
> ## Initial values are in fact the converged values
> fm1 <- nls(height ~ SSasymp( age, Asym, resp0, lrc), data = Lob.329)
> summary(fm1)

Formula: height ~ SSasymp(age, Asym, resp0, lrc)

Parameters:
      Estimate Std. Error t value Pr(>|t|)    
Asym   94.1282     8.4030  11.202 0.001525 ** 
resp0  -8.2508     1.2261  -6.729 0.006700 ** 
lrc    -3.2176     0.1386 -23.218 0.000175 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.7493 on 3 degrees of freedom

> 
> ## Visualize the SSasymp()  model  parametrization :
> 
>   xx <- seq(-.3, 5, length.out = 101)
>   ##  Asym + (R0-Asym) * exp(-exp(lrc)* x) :
>   yy <- 5 - 4 * exp(-xx / exp(3/4))
>   stopifnot( all.equal(yy, SSasymp(xx, Asym = 5, R0 = 1, lrc = -3/4)) )
>   require(graphics)
>   op <- par(mar = c(0, .2, 4.1, 0))
>   plot(xx, yy, type = "l", axes = FALSE, ylim = c(0,5.2), xlim = c(-.3, 5),
+        xlab = "", ylab = "", lwd = 2,
+        main = quote("Parameters in the SSasymp model " ~
+                     {f[phi](x) == phi[1] + (phi[2]-phi[1])*~e^{-e^{phi[3]}*~x}}))
>   mtext(quote(list(phi[1] == "Asym", phi[2] == "R0", phi[3] == "lrc")))
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
>   text(     -0.1, usr[4], "y", adj = c(1, 1))
>   abline(h = 5, lty = 3)
>   arrows(c(0.35, 0.65), 1,
+          c(0  ,  1   ), 1, length = 0.08, angle = 25); text(0.5, 1, quote(1))
>   y0 <- 1 + 4*exp(-3/4) ; t.5 <- log(2) / exp(-3/4) ; AR2 <- 3 # (Asym + R0)/2
>   segments(c(1, 1), c( 1, y0),
+            c(1, 0), c(y0,  1),  lty = 2, lwd = 0.75)
>   text(1.1, 1/2+y0/2, quote((phi[1]-phi[2])*e^phi[3]), adj = c(0,.5))
>   axis(2, at = c(1,AR2,5), labels= expression(phi[2], frac(phi[1]+phi[2],2), phi[1]),
+        pos=0, las=1)
>   arrows(c(.6,t.5-.6), AR2,
+          c(0, t.5   ), AR2, length = 0.08, angle = 25)
>   text(   t.5/2,   AR2, quote(t[0.5]))
>   text(   t.5 +.4, AR2,
+        quote({f(t[0.5]) == frac(phi[1]+phi[2],2)}~{} %=>% {}~~
+                 {t[0.5] == frac(log(2), e^{phi[3]})}), adj = c(0, 0.5))
>   par(op)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSasympOff")
> ### * SSasympOff
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSasympOff
> ### Title: Self-Starting 'nls' Asymptotic Model with an Offset
> ### Aliases: SSasympOff
> ### Keywords: models
> 
> ### ** Examples
> 
> CO2.Qn1 <- CO2[CO2$Plant == "Qn1", ]
> SSasympOff(CO2.Qn1$conc, 32, -4, 43)  # response only
[1] 19.65412 29.14785 31.27791 31.88435 31.99259 31.99970 32.00000
> local({  Asym <- 32; lrc <- -4; c0 <- 43
+   SSasympOff(CO2.Qn1$conc, Asym, lrc, c0) # response and gradient
+ })
[1] 19.65412 29.14785 31.27791 31.88435 31.99259 31.99970 32.00000
attr(,"gradient")
          Asym          lrc            c0
[1,] 0.6141911 1.175838e+01 -2.261227e-01
[2,] 0.9108704 6.895531e+00 -5.223887e-02
[3,] 0.9774346 2.737698e+00 -1.322559e-02
[4,] 0.9963859 6.503026e-01 -2.118250e-03
[5,] 0.9997683 6.204920e-02 -1.357751e-04
[6,] 0.9999906 3.479529e-03 -5.505583e-06
[7,] 1.0000000 1.369435e-05 -1.430967e-08
> getInitial(uptake ~ SSasympOff(conc, Asym, lrc, c0), data = CO2.Qn1)
     Asym       lrc        c0 
38.139782 -4.380647 51.223238 
> ## Initial values are in fact the converged values
> fm1 <- nls(uptake ~ SSasympOff(conc, Asym, lrc, c0), data = CO2.Qn1)
> summary(fm1)

Formula: uptake ~ SSasympOff(conc, Asym, lrc, c0)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
Asym  38.1398     0.9164  41.620 1.99e-06 ***
lrc   -4.3806     0.2042 -21.457 2.79e-05 ***
c0    51.2232    11.6698   4.389   0.0118 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 1.663 on 4 degrees of freedom

> 
> ## Visualize the SSasympOff()  model  parametrization :
> 
>   xx <- seq(0.25, 8,  by=1/16)
>   yy <- 5 * (1 -  exp(-(xx - 3/4)*0.4))
>   stopifnot( all.equal(yy, SSasympOff(xx, Asym = 5, lrc = log(0.4), c0 = 3/4)) )
>   require(graphics)
>   op <- par(mar = c(0, 0, 4.0, 0))
>   plot(xx, yy, type = "l", axes = FALSE, ylim = c(-.5,6), xlim = c(-1, 8),
+        xlab = "", ylab = "", lwd = 2,
+        main = "Parameters in the SSasympOff model")
>   mtext(quote(list(phi[1] == "Asym", phi[2] == "lrc", phi[3] == "c0")))
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
>   text(     -0.1, usr[4], "y", adj = c(1, 1))
>   abline(h = 5, lty = 3)
>   arrows(-0.8, c(2.1, 2.9),
+          -0.8, c(0  , 5  ), length = 0.1, angle = 25)
>   text  (-0.8, 2.5, quote(phi[1]))
>   segments(3/4, -.2, 3/4, 1.6, lty = 2)
>   text    (3/4,    c(-.3, 1.7), quote(phi[3]))
>   arrows(c(1.1, 1.4), -.15,
+          c(3/4, 7/4), -.15, length = 0.07, angle = 25)
>   text    (3/4 + 1/2, -.15, quote(1))
>   segments(c(3/4, 7/4, 7/4), c(0, 0, 2),   # 5 * exp(log(0.4)) = 2
+            c(7/4, 7/4, 3/4), c(0, 2, 0),  lty = 2, lwd = 2)
>   text(      7/4 +.1, 2./2, quote(phi[1]*e^phi[2]), adj = c(0, .5))
>   par(op)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSasympOrig")
> ### * SSasympOrig
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSasympOrig
> ### Title: Self-Starting 'nls' Asymptotic Model through the Origin
> ### Aliases: SSasympOrig
> ### Keywords: models
> 
> ### ** Examples
> 
> ## Visualize the SSasympOrig()  model  parametrization :
> 
>   xx <- seq(0, 5, length.out = 101)
>   yy <- 5 * (1- exp(-xx * log(2)))
>   stopifnot( all.equal(yy, SSasympOrig(xx, Asym = 5, lrc = log(log(2)))) )
> 
>   require(graphics)
>   op <- par(mar = c(0, 0, 3.5, 0))
>   plot(xx, yy, type = "l", axes = FALSE, ylim = c(0,5), xlim = c(-1/4, 5),
+        xlab = "", ylab = "", lwd = 2,
+        main = quote("Parameters in the SSasympOrig model"~~ f[phi](x)))
>   mtext(quote(list(phi[1] == "Asym", phi[2] == "lrc")))
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
>   text(   -0.1,   usr[4], "y", adj = c(1, 1))
>   abline(h = 5, lty = 3)
>   axis(2, at = 5*c(1/2,1), labels= expression(frac(phi[1],2), phi[1]), pos=0, las=1)
>   arrows(c(.3,.7), 5/2,
+          c(0, 1 ), 5/2, length = 0.08, angle = 25)
>   text(   0.5,     5/2, quote(t[0.5]))
>   text(   1 +.4,   5/2,
+        quote({f(t[0.5]) == frac(phi[1],2)}~{} %=>% {}~~{t[0.5] == frac(log(2), e^{phi[2]})}),
+        adj = c(0, 0.5))
>   par(op)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSbiexp")
> ### * SSbiexp
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSbiexp
> ### Title: Self-Starting 'nls' Biexponential Model
> ### Aliases: SSbiexp
> ### Keywords: models
> 
> ### ** Examples
> 
> Indo.1 <- Indometh[Indometh$Subject == 1, ]
> SSbiexp( Indo.1$time, 3, 1, 0.6, -1.3 )  # response only
 [1] 2.08098572 1.29421044 0.87967145 0.65483364 0.52711347 0.36094621
 [7] 0.26575722 0.20176113 0.15359129 0.11694936 0.06780767
> A1 <- 3; lrc1 <- 1; A2 <- 0.6; lrc2 <- -1.3
> SSbiexp( Indo.1$time, A1, lrc1, A2, lrc2 ) # response and gradient
 [1] 2.08098572 1.29421044 0.87967145 0.65483364 0.52711347 0.36094621
 [7] 0.26575722 0.20176113 0.15359129 0.11694936 0.06780767
attr(,"gradient")
                A1          lrc1        A2        lrc2
 [1,] 5.068347e-01 -1.033290e+00 0.9341363 -0.03818728
 [2,] 2.568814e-01 -1.047414e+00 0.8726106 -0.07134424
 [3,] 1.301964e-01 -7.962985e-01 0.8151372 -0.09996786
 [4,] 6.598804e-02 -5.381222e-01 0.7614492 -0.12451147
 [5,] 3.344502e-02 -3.409237e-01 0.7112973 -0.14538835
 [6,] 4.354421e-03 -7.101926e-02 0.5798049 -0.18961833
 [7,] 2.873397e-04 -7.029632e-03 0.4414920 -0.21657709
 [8,] 1.896098e-05 -6.184955e-04 0.3361737 -0.21988328
 [9,] 1.251198e-06 -5.101663e-05 0.2559792 -0.20928744
[10,] 8.256409e-08 -4.039784e-06 0.1949152 -0.19123411
[11,] 3.595188e-10 -2.345456e-08 0.1130128 -0.14783797
> print(getInitial(conc ~ SSbiexp(time, A1, lrc1, A2, lrc2), data = Indo.1),
+       digits = 5)
      A1     lrc1       A2     lrc2 
 2.02928  0.57939  0.19155 -1.78778 
> ## Initial values are in fact the converged values
> fm1 <- nls(conc ~ SSbiexp(time, A1, lrc1, A2, lrc2), data = Indo.1)
> summary(fm1)

Formula: conc ~ SSbiexp(time, A1, lrc1, A2, lrc2)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
A1     2.0293     0.1099  18.464 3.39e-07 ***
lrc1   0.5794     0.1247   4.648  0.00235 ** 
A2     0.1915     0.1106   1.731  0.12698    
lrc2  -1.7878     0.7871  -2.271  0.05737 .  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.04103 on 7 degrees of freedom

> 
> ## Show the model components visually
>   require(graphics)
> 
>   xx <- seq(0, 5, length.out = 101)
>   y1 <- 3.5 * exp(-4*xx)
>   y2 <- 1.5 * exp(-xx)
>   plot(xx, y1 + y2, type = "l", lwd=2, ylim = c(-0.2,6), xlim = c(0, 5),
+        main = "Components of the SSbiexp model")
>   lines(xx, y1, lty = 2, col="tomato"); abline(v=0, h=0, col="gray40")
>   lines(xx, y2, lty = 3, col="blue2" )
>   legend("topright", c("y1+y2", "y1 = 3.5 * exp(-4*x)", "y2 = 1.5 * exp(-x)"),
+          lty=1:3, col=c("black","tomato","blue2"), bty="n")
>   axis(2, pos=0, at = c(3.5, 1.5), labels = c("A1","A2"), las=2)
> 
> ## and how you could have got their sum via SSbiexp():
>   ySS <- SSbiexp(xx, 3.5, log(4), 1.5, log(1))
>   ##                      ---          ---
>   stopifnot(all.equal(y1+y2, ySS, tolerance = 1e-15))
> 
> ## Show a no-noise example
> datN <- data.frame(time = (0:600)/64)
> datN$conc <- predict(fm1, newdata=datN)
> plot(conc ~ time, data=datN) # perfect, no noise
> ## IGNORE_RDIFF_BEGIN
> ## Fails by default (scaleOffset=0) on most platforms {also after increasing maxiter !}
> ## Not run: 
> ##D         nls(conc ~ SSbiexp(time, A1, lrc1, A2, lrc2), data = datN, trace=TRUE)
> ## End(Not run)
> ## Don't show: 
> try( # maxiter=10: store less garbage
+         nls(conc ~ SSbiexp(time, A1, lrc1, A2, lrc2), data = datN,
+             trace=TRUE, control = list(maxiter = 10)) )
0.01722077  (5.34e+02): par = (0.6168807 -1.783839 2.050204 0.2004597)
3.308944e-06 (1.13e+04): par = (0.5798674 -1.784335 2.028943 0.1920502)
2.571079e-11 (7.68e+06): par = (0.5793882 -1.78778 2.029276 0.1915479)
1.650535e-23 (2.91e+03): par = (0.5793887 -1.787785 2.029277 0.1915475)
3.486470e-28 (9.76e-01): par = (0.5793887 -1.787785 2.029277 0.1915475)
3.475413e-28 (1.03e+00): par = (0.5793887 -1.787785 2.029277 0.1915475)
3.425893e-28 (5.48e-01): par = (0.5793887 -1.787785 2.029277 0.1915475)
3.425893e-28 (5.48e-01): par = (0.5793887 -1.787785 2.029277 0.1915475)
3.425893e-28 (5.48e-01): par = (0.5793887 -1.787785 2.029277 0.1915475)
3.425893e-28 (5.48e-01): par = (0.5793887 -1.787785 2.029277 0.1915475)
3.425893e-28 (5.48e-01): par = (0.5793887 -1.787785 2.029277 0.1915475)
Error in nls(y ~ cbind(exp(-exp(lrc1) * x), exp(-exp(lrc2) * x)), data = xy,  : 
  number of iterations exceeded maximum of 10
> ## End(Don't show)
> fmX1 <- nls(conc ~ SSbiexp(time, A1, lrc1, A2, lrc2), data = datN,
+             control = list(scaleOffset=1))
> fmX  <- nls(conc ~ SSbiexp(time, A1, lrc1, A2, lrc2), data = datN,
+             control = list(scaleOffset=1, printEval=TRUE, tol=1e-11, nDcentral=TRUE), trace=TRUE)
0.01722077    (6.55e-02): par = (0.6168807 -1.783839 2.050204 0.2004597)
  It.   1, fac=           1, eval (no.,total): ( 1,  1): new dev = 3.30894e-06
3.308942e-06  (9.08e-04): par = (0.5798674 -1.784335 2.028943 0.1920502)
  It.   2, fac=           1, eval (no.,total): ( 1,  2): new dev = 2.57108e-11
2.571081e-11  (2.53e-06): par = (0.5793882 -1.78778 2.029276 0.1915479)
  It.   3, fac=           1, eval (no.,total): ( 1,  3): new dev = 1.66968e-23
1.669677e-23  (2.04e-12): par = (0.5793887 -1.787785 2.029277 0.1915475)
1.667503e-23  (1.67e-13): par = (2.029277 0.5793887 0.1915475 -1.787785)
> all.equal(coef(fm1), coef(fmX1), tolerance=0) # ... rel.diff.: 1.57e-6
[1] "Mean relative difference: 1.574121e-06"
> all.equal(coef(fm1), coef(fmX),  tolerance=0) # ... rel.diff.: 1.03e-12
[1] "Mean relative difference: 1.031016e-12"
> ## IGNORE_RDIFF_END
> stopifnot(all.equal(coef(fm1), coef(fmX1), tolerance = 6e-6),
+           all.equal(coef(fm1), coef(fmX ), tolerance = 1e-11))
> 
> 
> 
> cleanEx()
> nameEx("SSfol")
> ### * SSfol
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSfol
> ### Title: Self-Starting 'nls' First-order Compartment Model
> ### Aliases: SSfol
> ### Keywords: models
> 
> ### ** Examples
> 
> Theoph.1 <- Theoph[ Theoph$Subject == 1, ]
> with(Theoph.1, SSfol(Dose, Time, -2.5, 0.5, -3)) # response only
 [1] 0.000000 2.214486 3.930988 5.261945 5.659813 5.084852 4.587699 3.916808
 [9] 3.318395 2.579204 0.943593
> with(Theoph.1, local({  lKe <- -2.5; lKa <- 0.5; lCl <- -3
+   SSfol(Dose, Time, lKe, lKa, lCl) # response _and_ gradient
+ }))
 [1] 0.000000 2.214486 3.930988 5.261945 5.659813 5.084852 4.587699 3.916808
 [9] 3.318395 2.579204 0.943593
attr(,"gradient")
            lKe         lKa       lCl
 [1,]  0.000000  0.00000000  0.000000
 [2,]  2.190284  1.78781716 -2.214486
 [3,]  3.825518  2.35519507 -3.930988
 [4,]  4.952713  1.75648252 -5.261945
 [5,]  4.976520  0.53458070 -5.659813
 [6,]  3.752822 -0.18560297 -5.084852
 [7,]  2.906859 -0.22729852 -4.587699
 [8,]  1.861771 -0.20447579 -3.916808
 [9,]  1.027129 -0.17383515 -3.318395
[10,]  0.148370 -0.13513891 -2.579204
[11,] -0.894541 -0.04944021 -0.943593
> getInitial(conc ~ SSfol(Dose, Time, lKe, lKa, lCl), data = Theoph.1)
      lKe       lKa       lCl 
-2.994845  0.609169 -3.971003 
> ## Initial values are in fact the converged values
> fm1 <- nls(conc ~ SSfol(Dose, Time, lKe, lKa, lCl), data = Theoph.1)
> summary(fm1)

Formula: conc ~ SSfol(Dose, Time, lKe, lKa, lCl)

Parameters:
    Estimate Std. Error t value Pr(>|t|)    
lKe  -2.9196     0.1709 -17.085 1.40e-07 ***
lKa   0.5752     0.1728   3.328   0.0104 *  
lCl  -3.9159     0.1273 -30.768 1.35e-09 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.732 on 8 degrees of freedom

> 
> 
> 
> cleanEx()
> nameEx("SSfpl")
> ### * SSfpl
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSfpl
> ### Title: Self-Starting 'nls' Four-Parameter Logistic Model
> ### Aliases: SSfpl
> ### Keywords: models
> 
> ### ** Examples
> 
> Chick.1 <- ChickWeight[ChickWeight$Chick == 1, ]
> SSfpl(Chick.1$Time, 13, 368, 14, 6)  # response only
 [1]  44.38189  55.31704  69.39853  87.05603 108.47420 133.43149 161.18758
 [8] 190.50000 219.81242 247.56851 272.52580 283.70240
> local({
+   A <- 13; B <- 368; xmid <- 14; scal <- 6
+   SSfpl(Chick.1$Time, A, B, xmid, scal) # response _and_ gradient
+ })
 [1]  44.38189  55.31704  69.39853  87.05603 108.47420 133.43149 161.18758
 [8] 190.50000 219.81242 247.56851 272.52580 283.70240
attr(,"gradient")
              A          B       xmid       scal
 [1,] 0.9116003 0.08839968  -4.767956  11.125231
 [2,] 0.8807971 0.11920292  -6.212120  12.424241
 [3,] 0.8411309 0.15886910  -7.906425  13.177374
 [4,] 0.7913915 0.20860853  -9.767885  13.023846
 [5,] 0.7310586 0.26894142 -11.632873  11.632873
 [6,] 0.6607564 0.33924363 -13.262646   8.841764
 [7,] 0.5825702 0.41742979 -14.388278   4.796093
 [8,] 0.5000000 0.50000000 -14.791667   0.000000
 [9,] 0.4174298 0.58257021 -14.388278  -4.796093
[10,] 0.3392436 0.66075637 -13.262646  -8.841764
[11,] 0.2689414 0.73105858 -11.632873 -11.632873
[12,] 0.2374580 0.76254197 -10.713410 -12.498978
> print(getInitial(weight ~ SSfpl(Time, A, B, xmid, scal), data = Chick.1),
+       digits = 5)
       A        B     xmid     scal 
 27.4532 348.9712  19.3905   6.6726 
> ## Initial values are in fact the converged values
> fm1 <- nls(weight ~ SSfpl(Time, A, B, xmid, scal), data = Chick.1)
> summary(fm1)

Formula: weight ~ SSfpl(Time, A, B, xmid, scal)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
A      27.453      6.601   4.159 0.003169 ** 
B     348.971     57.899   6.027 0.000314 ***
xmid   19.391      2.194   8.836 2.12e-05 ***
scal    6.673      1.002   6.662 0.000159 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 2.351 on 8 degrees of freedom

> 
> ## Visualizing the  SSfpl()  parametrization
>   xx <- seq(-0.5, 5, length.out = 101)
>   yy <- 1 + 4 / (1 + exp((2-xx))) # == SSfpl(xx, *) :
>   stopifnot( all.equal(yy, SSfpl(xx, A = 1, B = 5, xmid = 2, scal = 1)) )
>   require(graphics)
>   op <- par(mar = c(0, 0, 3.5, 0))
>   plot(xx, yy, type = "l", axes = FALSE, ylim = c(0,6), xlim = c(-1, 5),
+        xlab = "", ylab = "", lwd = 2,
+        main = "Parameters in the SSfpl model")
>   mtext(quote(list(phi[1] == "A", phi[2] == "B", phi[3] == "xmid", phi[4] == "scal")))
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
>   text(     -0.1, usr[4], "y", adj = c(1, 1))
>   abline(h = c(1, 5), lty = 3)
>   arrows(-0.8, c(2.1, 2.9),
+          -0.8, c(0,   5  ), length = 0.1, angle = 25)
>   text  (-0.8, 2.5, quote(phi[1]))
>   arrows(-0.3, c(1/4, 3/4),
+          -0.3, c(0,   1  ), length = 0.07, angle = 25)
>   text  (-0.3, 0.5, quote(phi[2]))
>   text(2, -.1, quote(phi[3]))
>   segments(c(2,3,3), c(0,3,4), # SSfpl(x = xmid = 2) = 3
+            c(2,3,2), c(3,4,3),    lty = 2, lwd = 0.75)
>   arrows(c(2.3, 2.7), 3,
+          c(2.0, 3  ), 3, length = 0.08, angle = 25)
>   text(      2.5,     3, quote(phi[4])); text(3.1, 3.5, "1")
>   par(op)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSgompertz")
> ### * SSgompertz
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSgompertz
> ### Title: Self-Starting 'nls' Gompertz Growth Model
> ### Aliases: SSgompertz
> ### Keywords: models
> 
> ### ** Examples
> 
> DNase.1 <- subset(DNase, Run == 1)
> SSgompertz(log(DNase.1$conc), 4.5, 2.3, 0.7)  # response only
 [1] 0.00525729 0.00525729 0.07323255 0.07323255 0.18049064 0.18049064
 [7] 0.36508763 0.36508763 0.63288772 0.63288772 0.97257180 0.97257180
[13] 1.36033340 1.36033340 1.76786902 1.76786902
> local({  Asym <- 4.5; b2 <- 2.3; b3 <- 0.7
+   SSgompertz(log(DNase.1$conc), Asym, b2, b3) # response _and_ gradient
+ })
 [1] 0.00525729 0.00525729 0.07323255 0.07323255 0.18049064 0.18049064
 [7] 0.36508763 0.36508763 0.63288772 0.63288772 0.97257180 0.97257180
[13] 1.36033340 1.36033340 1.76786902 1.76786902
attr(,"gradient")
             Asym          b2         b3
 [1,] 0.001168287 -0.01543407  0.1531221
 [2,] 0.001168287 -0.01543407  0.1531221
 [3,] 0.016273900 -0.13112424  0.7036230
 [4,] 0.016273900 -0.13112424  0.7036230
 [5,] 0.040109031 -0.25238507  0.7795153
 [6,] 0.040109031 -0.25238507  0.7795153
 [7,] 0.081130585 -0.39869082  0.3233828
 [8,] 0.081130585 -0.39869082  0.3233828
 [9,] 0.140641716 -0.53975407 -0.7914802
[10,] 0.140641716 -0.53975407 -0.7914802
[11,] 0.216127067 -0.64777036 -2.4251586
[12,] 0.216127067 -0.64777036 -2.4251586
[13,] 0.302296311 -0.70757894 -4.2605728
[14,] 0.302296311 -0.70757894 -4.2605728
[15,] 0.392859783 -0.71814108 -5.9597255
[16,] 0.392859783 -0.71814108 -5.9597255
> print(getInitial(density ~ SSgompertz(log(conc), Asym, b2, b3),
+                  data = DNase.1), digits = 5)
   Asym      b2      b3 
4.60333 2.27134 0.71647 
> ## Initial values are in fact the converged values
> fm1 <- nls(density ~ SSgompertz(log(conc), Asym, b2, b3),
+            data = DNase.1)
> summary(fm1)

Formula: density ~ SSgompertz(log(conc), Asym, b2, b3)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
Asym  4.60333    0.65321   7.047 8.71e-06 ***
b2    2.27134    0.14373  15.803 7.24e-10 ***
b3    0.71647    0.02206  32.475 7.85e-14 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.02684 on 13 degrees of freedom

> plot(density ~ log(conc), DNase.1, # xlim = c(0, 21),
+      main = "SSgompertz() fit to DNase.1")
> ux <- par("usr")[1:2]; x <- seq(ux[1], ux[2], length.out=250)
> lines(x, do.call(SSgompertz, c(list(x=x), coef(fm1))), col = "red", lwd=2)
> As <- coef(fm1)[["Asym"]]; abline(v = 0, h = 0, lty = 3)
> axis(2, at= exp(-coef(fm1)[["b2"]]), quote(e^{-b[2]}), las=1, pos=0)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSlogis")
> ### * SSlogis
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSlogis
> ### Title: Self-Starting 'nls' Logistic Model
> ### Aliases: SSlogis
> ### Keywords: models
> 
> ### ** Examples
> 
> dwlg1 <- data.frame(Prop = c(rep(0,5), 2, 5, rep(9, 9)), end = 1:16)
> iPar <- getInitial(Prop ~ SSlogis(end, Asym, xmid, scal), data = dwlg1)
> ## failed in R <= 3.4.2 (because of the '0's in 'Prop')
> stopifnot(all.equal(tolerance = 1e-6,
+    iPar, c(Asym = 9.0678, xmid = 6.79331, scal = 0.499934)))
> 
> ## Visualize the SSlogis()  model  parametrization :
>   xx <- seq(-0.75, 5, by=1/32)
>   yy <- 5 / (1 + exp((2-xx)/0.6)) # == SSlogis(xx, *):
>   stopifnot( all.equal(yy, SSlogis(xx, Asym = 5, xmid = 2, scal = 0.6)) )
>   require(graphics)
>   op <- par(mar = c(0.5, 0, 3.5, 0))
>   plot(xx, yy, type = "l", axes = FALSE, ylim = c(0,6), xlim = c(-1, 5),
+        xlab = "", ylab = "", lwd = 2,
+        main = "Parameters in the SSlogis model")
>   mtext(quote(list(phi[1] == "Asym", phi[2] == "xmid", phi[3] == "scal")))
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
>   text(     -0.1, usr[4], "y", adj = c(1, 1))
>   abline(h = 5, lty = 3)
>   arrows(-0.8, c(2.1, 2.9),
+          -0.8, c(0,   5  ), length = 0.1, angle = 25)
>   text  (-0.8, 2.5, quote(phi[1]))
>   segments(c(2,2.6,2.6), c(0,  2.5,3.5),   # NB.  SSlogis(x = xmid = 2) = 2.5
+            c(2,2.6,2  ), c(2.5,3.5,2.5), lty = 2, lwd = 0.75)
>   text(2, -.1, quote(phi[2]))
>   arrows(c(2.2, 2.4), 2.5,
+          c(2.0, 2.6), 2.5, length = 0.08, angle = 25)
>   text(      2.3,     2.5, quote(phi[3])); text(2.7, 3, "1")
>   par(op)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSmicmen")
> ### * SSmicmen
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSmicmen
> ### Title: Self-Starting 'nls' Michaelis-Menten Model
> ### Aliases: SSmicmen
> ### Keywords: models
> 
> ### ** Examples
> 
> PurTrt <- Puromycin[ Puromycin$state == "treated", ]
> SSmicmen(PurTrt$conc, 200, 0.05)  # response only
 [1]  57.14286  57.14286 109.09091 109.09091 137.50000 137.50000 162.96296
 [8] 162.96296 183.60656 183.60656 191.30435 191.30435
> local({  Vm <- 200; K <- 0.05
+   SSmicmen(PurTrt$conc, Vm, K)    # response _and_ gradient
+ })
 [1]  57.14286  57.14286 109.09091 109.09091 137.50000 137.50000 162.96296
 [8] 162.96296 183.60656 183.60656 191.30435 191.30435
attr(,"gradient")
             Vm         K
 [1,] 0.2857143 -816.3265
 [2,] 0.2857143 -816.3265
 [3,] 0.5454545 -991.7355
 [4,] 0.5454545 -991.7355
 [5,] 0.6875000 -859.3750
 [6,] 0.6875000 -859.3750
 [7,] 0.8148148 -603.5665
 [8,] 0.8148148 -603.5665
 [9,] 0.9180328 -300.9944
[10,] 0.9180328 -300.9944
[11,] 0.9565217 -166.3516
[12,] 0.9565217 -166.3516
> print(getInitial(rate ~ SSmicmen(conc, Vm, K), data = PurTrt), digits = 3)
      Vm        K 
212.6837   0.0641 
> ## Initial values are in fact the converged values
> fm1 <- nls(rate ~ SSmicmen(conc, Vm, K), data = PurTrt)
> summary(fm1)

Formula: rate ~ SSmicmen(conc, Vm, K)

Parameters:
    Estimate Std. Error t value Pr(>|t|)    
Vm 2.127e+02  6.947e+00  30.615 3.24e-11 ***
K  6.412e-02  8.281e-03   7.743 1.57e-05 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 10.93 on 10 degrees of freedom

> ## Alternative call using the subset argument
> fm2 <- nls(rate ~ SSmicmen(conc, Vm, K), data = Puromycin,
+            subset = state == "treated")
> summary(fm2) # The same indeed:

Formula: rate ~ SSmicmen(conc, Vm, K)

Parameters:
    Estimate Std. Error t value Pr(>|t|)    
Vm 2.127e+02  6.947e+00  30.615 3.24e-11 ***
K  6.412e-02  8.281e-03   7.743 1.57e-05 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 10.93 on 10 degrees of freedom

> stopifnot(all.equal(coef(summary(fm1)), coef(summary(fm2))))
> 
> ## Visualize the SSmicmen()  Michaelis-Menton model parametrization :
> 
>   xx <- seq(0, 5, length.out = 101)
>   yy <- 5 * xx/(1+xx)
>   stopifnot(all.equal(yy, SSmicmen(xx, Vm = 5, K = 1)))
>   require(graphics)
>   op <- par(mar = c(0, 0, 3.5, 0))
>   plot(xx, yy, type = "l", lwd = 2, ylim = c(-1/4,6), xlim = c(-1, 5),
+        ann = FALSE, axes = FALSE, main = "Parameters in the SSmicmen model")
>   mtext(quote(list(phi[1] == "Vm", phi[2] == "K")))
>   usr <- par("usr")
>   arrows(usr[1], 0, usr[2], 0, length = 0.1, angle = 25)
>   arrows(0, usr[3], 0, usr[4], length = 0.1, angle = 25)
>   text(usr[2] - 0.2, 0.1, "x", adj = c(1, 0))
>   text(     -0.1, usr[4], "y", adj = c(1, 1))
>   abline(h = 5, lty = 3)
>   arrows(-0.8, c(2.1, 2.9),
+          -0.8, c(0,   5  ),  length = 0.1, angle = 25)
>   text(  -0.8,     2.5, quote(phi[1]))
>   segments(1, 0, 1, 2.7, lty = 2, lwd = 0.75)
>   text(1, 2.7, quote(phi[2]))
>   par(op)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SSweibull")
> ### * SSweibull
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SSweibull
> ### Title: Self-Starting 'nls' Weibull Growth Curve Model
> ### Aliases: SSweibull
> ### Keywords: models
> 
> ### ** Examples
> 
> Chick.6 <- subset(ChickWeight, (Chick == 6) & (Time > 0))
> SSweibull(Chick.6$Time, 160, 115, -5.5, 2.5)   # response only
 [1]  47.62811  59.09743  79.79756 105.12008 128.41818 145.02585 154.25783
 [8] 158.24919 159.58222 159.92314 159.97023
> local({ Asym <- 160; Drop <- 115; lrc <- -5.5; pwr <- 2.5
+   SSweibull(Chick.6$Time, Asym, Drop, lrc, pwr) # response _and_ gradient
+ })
 [1]  47.62811  59.09743  79.79756 105.12008 128.41818 145.02585 154.25783
 [8] 158.24919 159.58222 159.92314 159.97023
attr(,"gradient")
      Asym          Drop        lrc        pwr
 [1,]    1 -0.9771469094  2.5978438  1.8006881
 [2,]    1 -0.8774136912 13.1957043 18.2931305
 [3,]    1 -0.6974125358 28.9032091 51.7875987
 [4,]    1 -0.4772166721 40.5993205 84.4239136
 [5,]    1 -0.2746244909 40.8147795 93.9795029
 [6,]    1 -0.1302099955 30.5264027 75.8552610
 [7,]    1 -0.0499319343 17.2098335 45.4177374
 [8,]    1 -0.0152244293  7.3268815 20.3144290
 [9,]    1 -0.0036328431  2.3469622  6.7835933
[10,]    1 -0.0006683898  0.5619310  1.6833949
[11,]    1 -0.0002589123  0.2459116  0.7486834
> 
> ## IGNORE_RDIFF_BEGIN
> getInitial(weight ~ SSweibull(Time, Asym, Drop, lrc, pwr), data = Chick.6)
      Asym       Drop        lrc        pwr 
158.501204 110.997081  -5.993421   2.646141 
> ## IGNORE_RDIFF_END
> ## Initial values are in fact the converged values
> fm1 <- nls(weight ~ SSweibull(Time, Asym, Drop, lrc, pwr), data = Chick.6)
> summary(fm1)

Formula: weight ~ SSweibull(Time, Asym, Drop, lrc, pwr)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
Asym 158.5012     1.1769  134.67 3.28e-13 ***
Drop 110.9971     2.6330   42.16 1.10e-09 ***
lrc   -5.9934     0.3733  -16.05 8.83e-07 ***
pwr    2.6461     0.1613   16.41 7.62e-07 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 2.061 on 7 degrees of freedom

> 
> ## Data and Fit:
> plot(weight ~ Time, Chick.6, xlim = c(0, 21), main = "SSweibull() fit to Chick.6")
> ux <- par("usr")[1:2]; x <- seq(ux[1], ux[2], length.out=250)
> lines(x, do.call(SSweibull, c(list(x=x), coef(fm1))), col = "red", lwd=2)
> As <- coef(fm1)[["Asym"]]; abline(v = 0, h = c(As, As - coef(fm1)[["Drop"]]), lty = 3)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("SignRank")
> ### * SignRank
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: SignRank
> ### Title: Distribution of the Wilcoxon Signed Rank Statistic
> ### Aliases: SignRank dsignrank psignrank qsignrank rsignrank
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
> par(mfrow = c(2,2))
> for(n in c(4:5,10,40)) {
+   x <- seq(0, n*(n+1)/2, length.out = 501)
+   plot(x, dsignrank(x, n = n), type = "l",
+        main = paste0("dsignrank(x, n = ", n, ")"))
+ }
> ## Don't show: 
> p <- c(1, 1, 1, 2, 2:6, 8, 10, 11, 13, 15, 17, 20, 22, 24,
+        27, 29, 31, 33, 35, 36, 38, 39, 39, 40)
> stopifnot(round(dsignrank(0:56, n = 10)* 2^10) == c(p, rev(p), 0),
+           qsignrank((1:16)/ 16, n = 4) == c(0:2, rep(3:7, each = 2), 8:10))
> ## End(Don't show)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("StructTS")
> ### * StructTS
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: StructTS
> ### Title: Fit Structural Time Series
> ### Aliases: StructTS print.StructTS predict.StructTS
> ### Keywords: ts
> 
> ### ** Examples
> 
> ## see also JohnsonJohnson, Nile and AirPassengers
> require(graphics)
> 
> trees <- window(treering, start = 0)
> (fit <- StructTS(trees, type = "level"))

Call:
StructTS(x = trees, type = "level")

Variances:
  level  epsilon  
0.00037  0.07199  
> plot(trees)
> lines(fitted(fit), col = "green")
> tsdiag(fit)
> 
> (fit <- StructTS(log10(UKgas), type = "BSM"))

Call:
StructTS(x = log10(UKgas), type = "BSM")

Variances:
    level      slope       seas    epsilon  
0.000e+00  1.733e-05  7.137e-04  3.678e-04  
> par(mfrow = c(4, 1)) # to give appropriate aspect ratio for next plot.
> plot(log10(UKgas))
> plot(cbind(fitted(fit), resids=resid(fit)), main = "UK gas consumption")
> 
> ## keep some parameters fixed; trace optimizer:
> StructTS(log10(UKgas), type = "BSM", fixed = c(0.1,0.001,NA,NA),
+          optim.control = list(trace = TRUE))
iter   10 value -0.936176
final  value -0.936176 
converged

Call:
StructTS(x = log10(UKgas), type = "BSM", fixed = c(0.1, 0.001, NA, NA), optim.control = list(trace = TRUE))

Variances:
    level      slope       seas    epsilon  
0.1000000  0.0010000  0.0003012  0.0000000  
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("TDist")
> ### * TDist
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: TDist
> ### Title: The Student t Distribution
> ### Aliases: TDist dt pt qt rt
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
> 1 - pt(1:5, df = 1)
[1] 0.25000000 0.14758362 0.10241638 0.07797913 0.06283296
> qt(.975, df = c(1:10,20,50,100,1000))
 [1] 12.706205  4.302653  3.182446  2.776445  2.570582  2.446912  2.364624
 [8]  2.306004  2.262157  2.228139  2.085963  2.008559  1.983972  1.962339
> 
> tt <- seq(0, 10, length.out = 21)
> ncp <- seq(0, 6, length.out = 31)
> ptn <- outer(tt, ncp, function(t, d) pt(t, df = 3, ncp = d))
> t.tit <- "Non-central t - Probabilities"
> image(tt, ncp, ptn, zlim = c(0,1), main = t.tit)
> persp(tt, ncp, ptn, zlim = 0:1, r = 2, phi = 20, theta = 200, main = t.tit,
+       xlab = "t", ylab = "non-centrality parameter",
+       zlab = "Pr(T <= t)")
> 
> plot(function(x) dt(x, df = 3, ncp = 2), -3, 11, ylim = c(0, 0.32),
+      main = "Non-central t - Density", yaxs = "i")
> 
> ## Relation between F_t(.) = pt(x, n) and pbeta():
> ptBet <- function(t, n) {
+     x <- n/(n + t^2)
+     r <- pb <- pbeta(x, n/2, 1/2) / 2
+     pos <- t > 0
+     r[pos] <- 1 - pb[pos]
+     r
+ }
> x <- seq(-5, 5, by = 1/8)
> nu <- 3:10
> pt. <- outer(x, nu, pt)
> ptB <- outer(x, nu, ptBet)
> ## matplot(x, pt., type = "l")
> stopifnot(all.equal(pt., ptB, tolerance = 1e-15))
> 
> 
> 
> cleanEx()
> nameEx("Tukey")
> ### * Tukey
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Tukey
> ### Title: The Studentized Range Distribution
> ### Aliases: Tukey ptukey qtukey
> ### Keywords: distribution
> 
> ### ** Examples
> 
> if(interactive())
+   curve(ptukey(x, nm = 6, df = 5), from = -1, to = 8, n = 101)
> (ptt <- ptukey(0:10, 2, df =  5))
 [1] 0.0000000 0.4889159 0.7835628 0.9126407 0.9632574 0.9833586 0.9918510
 [8] 0.9957141 0.9976011 0.9985838 0.9991249
> (qtt <- qtukey(.95, 2, df =  2:11))
 [1] 6.079637 4.500659 3.926503 3.635351 3.460456 3.344084 3.261182 3.199173
 [9] 3.151064 3.112663
> ## The precision may be not much more than about 8 digits:
> 
> 
> 
> cleanEx()
> nameEx("TukeyHSD")
> ### * TukeyHSD
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: TukeyHSD
> ### Title: Compute Tukey Honest Significant Differences
> ### Aliases: TukeyHSD
> ### Keywords: models design
> 
> ### ** Examples
> 
> require(graphics)
> 
> summary(fm1 <- aov(breaks ~ wool + tension, data = warpbreaks))
            Df Sum Sq Mean Sq F value  Pr(>F)   
wool         1    451   450.7   3.339 0.07361 . 
tension      2   2034  1017.1   7.537 0.00138 **
Residuals   50   6748   135.0                   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> TukeyHSD(fm1, "tension", ordered = TRUE)
  Tukey multiple comparisons of means
    95% family-wise confidence level
    factor levels have been ordered

Fit: aov(formula = breaks ~ wool + tension, data = warpbreaks)

$tension
         diff        lwr      upr     p adj
M-H  4.722222 -4.6311985 14.07564 0.4474210
L-H 14.722222  5.3688015 24.07564 0.0011218
L-M 10.000000  0.6465793 19.35342 0.0336262

> plot(TukeyHSD(fm1, "tension"))
> 
> 
> 
> cleanEx()
> nameEx("Uniform")
> ### * Uniform
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Uniform
> ### Title: The Uniform Distribution
> ### Aliases: Uniform dunif punif qunif runif
> ### Keywords: distribution
> 
> ### ** Examples
> 
> u <- runif(20)
> 
> ## The following relations always hold :
> punif(u) == u
 [1] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE
[16] TRUE TRUE TRUE TRUE TRUE
> dunif(u) == 1
 [1] TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE TRUE
[16] TRUE TRUE TRUE TRUE TRUE
> 
> var(runif(10000))  #- ~ = 1/12 = .08333
[1] 0.08475621
> 
> 
> 
> cleanEx()
> nameEx("Weibull")
> ### * Weibull
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Weibull
> ### Title: The Weibull Distribution
> ### Aliases: Weibull dweibull pweibull qweibull rweibull
> ### Keywords: distribution
> 
> ### ** Examples
> 
> x <- c(0, rlnorm(50))
> all.equal(dweibull(x, shape = 1), dexp(x))
[1] TRUE
> all.equal(pweibull(x, shape = 1, scale = pi), pexp(x, rate = 1/pi))
[1] TRUE
> ## Cumulative hazard H():
> all.equal(pweibull(x, 2.5, pi, lower.tail = FALSE, log.p = TRUE),
+           -(x/pi)^2.5, tolerance = 1e-15)
[1] TRUE
> all.equal(qweibull(x/11, shape = 1, scale = pi), qexp(x/11, rate = 1/pi))
[1] TRUE
> 
> 
> 
> cleanEx()
> nameEx("Wilcoxon")
> ### * Wilcoxon
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Wilcoxon
> ### Title: Distribution of the Wilcoxon Rank Sum Statistic
> ### Aliases: Wilcoxon dwilcox pwilcox qwilcox rwilcox
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
> x <- -1:(4*6 + 1)
> fx <- dwilcox(x, 4, 6)
> Fx <- pwilcox(x, 4, 6)
> 
> layout(rbind(1,2), widths = 1, heights = c(3,2))
> plot(x, fx, type = "h", col = "violet",
+      main =  "Probabilities (density) of Wilcoxon-Statist.(n=6, m=4)")
> plot(x, Fx, type = "s", col = "blue",
+      main =  "Distribution of Wilcoxon-Statist.(n=6, m=4)")
> abline(h = 0:1, col = "gray20", lty = 2)
> layout(1) # set back
> 
> N <- 200
> hist(U <- rwilcox(N, m = 4,n = 6), breaks = 0:25 - 1/2,
+      border = "red", col = "pink", sub = paste("N =",N))
> mtext("N * f(x),  f() = true \"density\"", side = 3, col = "blue")
>  lines(x, N*fx, type = "h", col = "blue", lwd = 2)
> points(x, N*fx, cex = 2)
> 
> ## Better is a Quantile-Quantile Plot
> qqplot(U, qw <- qwilcox((1:N - 1/2)/N, m = 4, n = 6),
+        main = paste("Q-Q-Plot of empirical and theoretical quantiles",
+                      "Wilcoxon Statistic,  (m=4, n=6)", sep = "\n"))
> n <- as.numeric(names(print(tU <- table(U))))
U
 0  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 
 1  2  4  5  5  5  7 12 13 14 12 23 16  9 13 16 13 11 14  1  1  2  1 
> text(n+.2, n+.5, labels = tU, col = "red")
> 
> 
> 
> cleanEx()
> nameEx("acf")
> ### * acf
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: acf
> ### Title: Auto- and Cross- Covariance and -Correlation Function Estimation
> ### Aliases: acf ccf pacf pacf.default [.acf
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Examples from Venables & Ripley
> acf(lh)
> acf(lh, type = "covariance")
> pacf(lh)
> 
> acf(ldeaths)
> acf(ldeaths, ci.type = "ma")
> acf(ts.union(mdeaths, fdeaths))
> ccf(mdeaths, fdeaths, ylab = "cross-correlation")
> # (just the cross-correlations)
> 
> presidents # contains missing values
     Qtr1 Qtr2 Qtr3 Qtr4
1945   NA   87   82   75
1946   63   50   43   32
1947   35   60   54   55
1948   36   39   NA   NA
1949   69   57   57   51
1950   45   37   46   39
1951   36   24   32   23
1952   25   32   NA   32
1953   59   74   75   60
1954   71   61   71   57
1955   71   68   79   73
1956   76   71   67   75
1957   79   62   63   57
1958   60   49   48   52
1959   57   62   61   66
1960   71   62   61   57
1961   72   83   71   78
1962   79   71   62   74
1963   76   64   62   57
1964   80   73   69   69
1965   71   64   69   62
1966   63   46   56   44
1967   44   52   38   46
1968   36   49   35   44
1969   59   65   65   56
1970   66   53   61   52
1971   51   48   54   49
1972   49   61   NA   NA
1973   68   44   40   27
1974   28   25   24   24
> acf(presidents, na.action = na.pass)
> pacf(presidents, na.action = na.pass)
> 
> 
> 
> cleanEx()
> nameEx("acf2AR")
> ### * acf2AR
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: acf2AR
> ### Title: Compute an AR Process Exactly Fitting an ACF
> ### Aliases: acf2AR
> ### Keywords: ts
> 
> ### ** Examples
> 
> (Acf <- ARMAacf(c(0.6, 0.3, -0.2)))
        0         1         2         3 
1.0000000 0.6923077 0.5769231 0.3538462 
> acf2AR(Acf)
              1      2    3
ar(1) 0.6923077 0.0000  0.0
ar(2) 0.5625000 0.1875  0.0
ar(3) 0.6000000 0.3000 -0.2
> 
> 
> 
> cleanEx()
> nameEx("add1")
> ### * add1
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: add1
> ### Title: Add or Drop All Possible Single Terms to a Model
> ### Aliases: add1 add1.default add1.lm add1.glm drop1 drop1.default
> ###   drop1.lm drop1.glm
> ### Keywords: models
> 
> ### ** Examples
> 
> ## Don't show: 
> od <- options(digits = 5)
> ## End(Don't show)
> require(graphics); require(utils)
> ## following example(swiss)
> lm1 <- lm(Fertility ~ ., data = swiss)
> add1(lm1, ~ I(Education^2) + .^2)
Single term additions

Model:
Fertility ~ Agriculture + Examination + Education + Catholic + 
    Infant.Mortality
                             Df Sum of Sq  RSS AIC
<none>                                    2105 191
I(Education^2)                1      11.8 2093 192
Agriculture:Examination       1      10.7 2094 192
Agriculture:Education         1       1.8 2103 193
Agriculture:Catholic          1      75.0 2030 191
Agriculture:Infant.Mortality  1       4.4 2101 193
Examination:Education         1      48.7 2056 192
Examination:Catholic          1      40.8 2064 192
Examination:Infant.Mortality  1      65.9 2039 191
Education:Catholic            1     278.2 1827 186
Education:Infant.Mortality    1      93.0 2012 191
Catholic:Infant.Mortality     1       2.4 2103 193
> drop1(lm1, test = "F")  # So called 'type II' anova
Single term deletions

Model:
Fertility ~ Agriculture + Examination + Education + Catholic + 
    Infant.Mortality
                 Df Sum of Sq  RSS AIC F value  Pr(>F)    
<none>                        2105 191                    
Agriculture       1       308 2413 195    5.99  0.0187 *  
Examination       1        53 2158 190    1.03  0.3155    
Education         1      1163 3268 209   22.64 2.4e-05 ***
Catholic          1       448 2553 198    8.72  0.0052 ** 
Infant.Mortality  1       409 2514 197    7.96  0.0073 ** 
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> 
> ## following example(glm)
> ## Don't show: 
> example(glm, echo = FALSE)
> ## End(Don't show)
> drop1(glm.D93, test = "Chisq")
Single term deletions

Model:
counts ~ outcome + treatment
          Df Deviance  AIC  LRT Pr(>Chi)  
<none>           5.13 56.8                
outcome    2    10.58 58.2 5.45    0.065 .
treatment  2     5.13 52.8 0.00    1.000  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> drop1(glm.D93, test = "F")
Warning in drop1.glm(glm.D93, test = "F") :
  F test assumes 'quasipoisson' family
Single term deletions

Model:
counts ~ outcome + treatment
          Df Deviance  AIC F value Pr(>F)
<none>           5.13 56.8               
outcome    2    10.58 58.2    2.13   0.23
treatment  2     5.13 52.8    0.00   1.00
> add1(glm.D93, scope = ~outcome*treatment, test = "Rao") ## Pearson Chi-square
Single term additions

Model:
counts ~ outcome + treatment
                  Df Deviance  AIC Rao score Pr(>Chi)
<none>                   5.13 56.8                   
outcome:treatment  4     0.00 59.6      5.17     0.27
> ## Don't show: 
> options(od)
> ## End(Don't show)
> 
> 
> 
> cleanEx()
> nameEx("addmargins")
> ### * addmargins
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: addmargins
> ### Title: Puts Arbitrary Margins on Multidimensional Tables or Arrays
> ### Aliases: addmargins
> ### Keywords: manip array
> 
> ### ** Examples
> 
> Aye <- sample(c("Yes", "Si", "Oui"), 177, replace = TRUE)
> Bee <- sample(c("Hum", "Buzz"), 177, replace = TRUE)
> Sea <- sample(c("White", "Black", "Red", "Dead"), 177, replace = TRUE)
> (A <- table(Aye, Bee, Sea))
, , Sea = Black

     Bee
Aye   Buzz Hum
  Oui    7   4
  Si     5   7
  Yes    7   7

, , Sea = Dead

     Bee
Aye   Buzz Hum
  Oui    8   7
  Si    10   7
  Yes   10   3

, , Sea = Red

     Bee
Aye   Buzz Hum
  Oui    2  12
  Si     4   7
  Yes    8   7

, , Sea = White

     Bee
Aye   Buzz Hum
  Oui    7  12
  Si    13  10
  Yes    6   7

> (aA <- addmargins(A))
, , Sea = Black

     Bee
Aye   Buzz Hum Sum
  Oui    7   4  11
  Si     5   7  12
  Yes    7   7  14
  Sum   19  18  37

, , Sea = Dead

     Bee
Aye   Buzz Hum Sum
  Oui    8   7  15
  Si    10   7  17
  Yes   10   3  13
  Sum   28  17  45

, , Sea = Red

     Bee
Aye   Buzz Hum Sum
  Oui    2  12  14
  Si     4   7  11
  Yes    8   7  15
  Sum   14  26  40

, , Sea = White

     Bee
Aye   Buzz Hum Sum
  Oui    7  12  19
  Si    13  10  23
  Yes    6   7  13
  Sum   26  29  55

, , Sea = Sum

     Bee
Aye   Buzz Hum Sum
  Oui   24  35  59
  Si    32  31  63
  Yes   31  24  55
  Sum   87  90 177

> ## Don't show: 
> stopifnot(is.table(aA))
> ## End(Don't show)
> ftable(A)
         Sea Black Dead Red White
Aye Bee                          
Oui Buzz         7    8   2     7
    Hum          4    7  12    12
Si  Buzz         5   10   4    13
    Hum          7    7   7    10
Yes Buzz         7   10   8     6
    Hum          7    3   7     7
> ftable(aA)
         Sea Black Dead Red White Sum
Aye Bee                              
Oui Buzz         7    8   2     7  24
    Hum          4    7  12    12  35
    Sum         11   15  14    19  59
Si  Buzz         5   10   4    13  32
    Hum          7    7   7    10  31
    Sum         12   17  11    23  63
Yes Buzz         7   10   8     6  31
    Hum          7    3   7     7  24
    Sum         14   13  15    13  55
Sum Buzz        19   28  14    26  87
    Hum         18   17  26    29  90
    Sum         37   45  40    55 177
> 
> # Non-commutative functions - note differences between resulting tables:
> ftable( addmargins(A, c(3, 1),
+                    FUN = list(list(Min = min, Max = max),
+                               Sum = sum)))
Margins computed over dimensions
in the following order:
1: Sea
2: Aye
         Sea Black Dead Red White Min Max
Aye Bee                                  
Oui Buzz         7    8   2     7   2   8
    Hum          4    7  12    12   4  12
Si  Buzz         5   10   4    13   4  13
    Hum          7    7   7    10   7  10
Yes Buzz         7   10   8     6   6  10
    Hum          7    3   7     7   3   7
Sum Buzz        19   28  14    26  12  31
    Hum         18   17  26    29  14  29
> ftable( addmargins(A, c(1, 3),
+                    FUN = list(Sum = sum,
+                               list(Min = min, Max = max))))
Margins computed over dimensions
in the following order:
1: Aye
2: Sea
         Sea Black Dead Red White Min Max
Aye Bee                                  
Oui Buzz         7    8   2     7   2   8
    Hum          4    7  12    12   4  12
Si  Buzz         5   10   4    13   4  13
    Hum          7    7   7    10   7  10
Yes Buzz         7   10   8     6   6  10
    Hum          7    3   7     7   3   7
Sum Buzz        19   28  14    26  14  28
    Hum         18   17  26    29  17  29
> 
> # Weird function needed to return the N when computing percentages
> sqsm <- function(x) sum(x)^2/100
> B <- table(Sea, Bee)
> round(sweep(addmargins(B, 1, list(list(All = sum, N = sqsm))), 2,
+             apply(B, 2, sum)/100, `/`), 1)
       Bee
Sea      Buzz   Hum
  Black  21.8  20.0
  Dead   32.2  18.9
  Red    16.1  28.9
  White  29.9  32.2
  All   100.0 100.0
  N      87.0  90.0
> round(sweep(addmargins(B, 2, list(list(All = sum, N = sqsm))), 1,
+             apply(B, 1, sum)/100, `/`), 1)
       Bee
Sea      Buzz   Hum   All     N
  Black  51.4  48.6 100.0  37.0
  Dead   62.2  37.8 100.0  45.0
  Red    35.0  65.0 100.0  40.0
  White  47.3  52.7 100.0  55.0
> 
> # A total over Bee requires formation of the Bee-margin first:
> mB <-  addmargins(B, 2, FUN = list(list(Total = sum)))
> round(ftable(sweep(addmargins(mB, 1, list(list(All = sum, N = sqsm))), 2,
+                    apply(mB, 2, sum)/100, `/`)), 1)
      Bee  Buzz   Hum Total
Sea                        
Black      21.8  20.0  20.9
Dead       32.2  18.9  25.4
Red        16.1  28.9  22.6
White      29.9  32.2  31.1
All       100.0 100.0 100.0
N          87.0  90.0 177.0
> 
> ## Zero.Printing table+margins:
> set.seed(1)
> x <- sample( 1:7, 20, replace = TRUE)
> y <- sample( 1:7, 20, replace = TRUE)
> tx <- addmargins( table(x, y) )
> print(tx, zero.print = ".")
     y
x      1  2  3  4  5  6  7 Sum
  1    2  .  .  .  1  .  .   3
  2    2  1  .  .  .  1  .   4
  3    .  1  .  .  1  1  .   3
  4    .  .  .  .  .  .  1   1
  5    1  .  .  1  .  .  1   3
  6    .  .  1  1  1  .  .   3
  7    .  1  .  .  1  1  .   3
  Sum  5  3  1  2  4  3  2  20
> 
> 
> 
> cleanEx()
> nameEx("aggregate")
> ### * aggregate
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: aggregate
> ### Title: Compute Summary Statistics of Data Subsets
> ### Aliases: aggregate aggregate.default aggregate.data.frame
> ###   aggregate.formula aggregate.ts
> ### Keywords: category array
> 
> ### ** Examples
> 
> ## Compute the averages for the variables in 'state.x77', grouped
> ## according to the region (Northeast, South, North Central, West) that
> ## each state belongs to.
> aggregate(state.x77, list(Region = state.region), mean)
         Region Population   Income Illiteracy Life Exp    Murder  HS Grad
1     Northeast   5495.111 4570.222   1.000000 71.26444  4.722222 53.96667
2         South   4208.125 4011.938   1.737500 69.70625 10.581250 44.34375
3 North Central   4803.000 4611.083   0.700000 71.76667  5.275000 54.51667
4          West   2915.308 4702.615   1.023077 71.23462  7.215385 62.00000
     Frost      Area
1 132.7778  18141.00
2  64.6250  54605.12
3 138.8333  62652.00
4 102.1538 134463.00
> 
> ## Compute the averages according to region and the occurrence of more
> ## than 130 days of frost.
> aggregate(state.x77,
+           list(Region = state.region,
+                Cold = state.x77[,"Frost"] > 130),
+           mean)
         Region  Cold Population   Income Illiteracy Life Exp    Murder
1     Northeast FALSE  8802.8000 4780.400  1.1800000 71.12800  5.580000
2         South FALSE  4208.1250 4011.938  1.7375000 69.70625 10.581250
3 North Central FALSE  7233.8333 4633.333  0.7833333 70.95667  8.283333
4          West FALSE  4582.5714 4550.143  1.2571429 71.70000  6.828571
5     Northeast  TRUE  1360.5000 4307.500  0.7750000 71.43500  3.650000
6 North Central  TRUE  2372.1667 4588.833  0.6166667 72.57667  2.266667
7          West  TRUE   970.1667 4880.500  0.7500000 70.69167  7.666667
   HS Grad    Frost      Area
1 52.06000 110.6000  21838.60
2 44.34375  64.6250  54605.12
3 53.36667 120.0000  56736.50
4 60.11429  51.0000  91863.71
5 56.35000 160.5000  13519.00
6 55.66667 157.6667  68567.50
7 64.20000 161.8333 184162.17
> ## (Note that no state in 'South' is THAT cold.)
> 
> 
> ## example with character variables and NAs
> testDF <- data.frame(v1 = c(1,3,5,7,8,3,5,NA,4,5,7,9),
+                      v2 = c(11,33,55,77,88,33,55,NA,44,55,77,99) )
> by1 <- c("red", "blue", 1, 2, NA, "big", 1, 2, "red", 1, NA, 12)
> by2 <- c("wet", "dry", 99, 95, NA, "damp", 95, 99, "red", 99, NA, NA)
> aggregate(x = testDF, by = list(by1, by2), FUN = "mean")
  Group.1 Group.2 v1 v2
1       1      95  5 55
2       2      95  7 77
3       1      99  5 55
4       2      99 NA NA
5     big    damp  3 33
6    blue     dry  3 33
7     red     red  4 44
8     red     wet  1 11
> 
> # and if you want to treat NAs as a group
> fby1 <- factor(by1, exclude = "")
> fby2 <- factor(by2, exclude = "")
> aggregate(x = testDF, by = list(fby1, fby2), FUN = "mean")
   Group.1 Group.2  v1   v2
1        1      95 5.0 55.0
2        2      95 7.0 77.0
3        1      99 5.0 55.0
4        2      99  NA   NA
5      big    damp 3.0 33.0
6     blue     dry 3.0 33.0
7      red     red 4.0 44.0
8      red     wet 1.0 11.0
9       12    <NA> 9.0 99.0
10    <NA>    <NA> 7.5 82.5
> 
> 
> ## Formulas, one ~ one, one ~ many, many ~ one, and many ~ many:
> aggregate(weight ~ feed, data = chickwts, mean)
       feed   weight
1    casein 323.5833
2 horsebean 160.2000
3   linseed 218.7500
4  meatmeal 276.9091
5   soybean 246.4286
6 sunflower 328.9167
> aggregate(breaks ~ wool + tension, data = warpbreaks, mean)
  wool tension   breaks
1    A       L 44.55556
2    B       L 28.22222
3    A       M 24.00000
4    B       M 28.77778
5    A       H 24.55556
6    B       H 18.77778
> aggregate(cbind(Ozone, Temp) ~ Month, data = airquality, mean)
  Month    Ozone     Temp
1     5 23.61538 66.73077
2     6 29.44444 78.22222
3     7 59.11538 83.88462
4     8 59.96154 83.96154
5     9 31.44828 76.89655
> aggregate(cbind(ncases, ncontrols) ~ alcgp + tobgp, data = esoph, sum)
       alcgp    tobgp ncases ncontrols
1  0-39g/day 0-9g/day      9       252
2      40-79 0-9g/day     34       145
3     80-119 0-9g/day     19        42
4       120+ 0-9g/day     16         8
5  0-39g/day    10-19     10        74
6      40-79    10-19     17        68
7     80-119    10-19     19        30
8       120+    10-19     12         6
9  0-39g/day    20-29      5        37
10     40-79    20-29     15        47
11    80-119    20-29      6        10
12      120+    20-29      7         5
13 0-39g/day      30+      5        23
14     40-79      30+      9        20
15    80-119      30+      7         5
16      120+      30+     10         3
> 
> ## "complete cases" vs. "available cases"
> colSums(is.na(airquality))  # NAs in Ozone but not Temp
  Ozone Solar.R    Wind    Temp   Month     Day 
     37       7       0       0       0       0 
> ## the default is to summarize *complete cases*:
> aggregate(cbind(Ozone, Temp) ~ Month, data = airquality, FUN = mean)
  Month    Ozone     Temp
1     5 23.61538 66.73077
2     6 29.44444 78.22222
3     7 59.11538 83.88462
4     8 59.96154 83.96154
5     9 31.44828 76.89655
> ## to handle missing values *per variable*:
> aggregate(cbind(Ozone, Temp) ~ Month, data = airquality, FUN = mean,
+           na.action = na.pass, na.rm = TRUE)
  Month    Ozone     Temp
1     5 23.61538 65.54839
2     6 29.44444 79.10000
3     7 59.11538 83.90323
4     8 59.96154 83.96774
5     9 31.44828 76.90000
> 
> ## Dot notation:
> aggregate(. ~ Species, data = iris, mean)
     Species Sepal.Length Sepal.Width Petal.Length Petal.Width
1     setosa        5.006       3.428        1.462       0.246
2 versicolor        5.936       2.770        4.260       1.326
3  virginica        6.588       2.974        5.552       2.026
> aggregate(len ~ ., data = ToothGrowth, mean)
  supp dose   len
1   OJ  0.5 13.23
2   VC  0.5  7.98
3   OJ  1.0 22.70
4   VC  1.0 16.77
5   OJ  2.0 26.06
6   VC  2.0 26.14
> 
> ## Often followed by xtabs():
> ag <- aggregate(len ~ ., data = ToothGrowth, mean)
> xtabs(len ~ ., data = ag)
    dose
supp   0.5     1     2
  OJ 13.23 22.70 26.06
  VC  7.98 16.77 26.14
> 
> ## Formula interface via 'by' (for pipe operations)
> ToothGrowth |> aggregate(len ~ ., FUN = mean)
  supp dose   len
1   OJ  0.5 13.23
2   VC  0.5  7.98
3   OJ  1.0 22.70
4   VC  1.0 16.77
5   OJ  2.0 26.06
6   VC  2.0 26.14
> 
> ## Compute the average annual approval ratings for American presidents.
> aggregate(presidents, nfrequency = 1, FUN = mean)
Time Series:
Start = 1945 
End = 1974 
Frequency = 1 
 [1]    NA 47.00 51.00    NA 58.50 41.75 28.75    NA 67.00 65.00 72.75 72.25
[13] 65.25 52.25 61.50 62.75 76.00 71.50 64.75 72.75 66.50 52.25 45.00 41.00
[25] 61.25 58.00 50.50    NA 44.75 25.25
> ## Give the summer less weight.
> aggregate(presidents, nfrequency = 1,
+           FUN = weighted.mean, w = c(1, 1, 0.5, 1))
Time Series:
Start = 1945 
End = 1974 
Frequency = 1 
 [1]       NA 47.57143 50.57143       NA 58.71429 41.14286 28.28571       NA
 [9] 65.85714 64.14286 71.85714 73.00000 65.57143 52.85714 61.57143 63.00000
[17] 76.71429 72.85714 65.14286 73.28571 66.14286 51.71429 46.00000 41.85714
[25] 60.71429 57.57143 50.00000       NA 45.42857 25.42857
> 
> 
> 
> cleanEx()
> nameEx("alias")
> ### * alias
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: alias
> ### Title: Find Aliases (Dependencies) in a Model
> ### Aliases: alias alias.formula alias.lm
> ### Keywords: models
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("anova.glm")
> ### * anova.glm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: anova.glm
> ### Title: Analysis of Deviance for Generalized Linear Model Fits
> ### Aliases: anova.glm
> ### Keywords: models regression
> 
> ### ** Examples
> 
> ## --- Continuing the Example from  '?glm':
> ## Don't show: 
> require(utils)
> example("glm", echo = FALSE)
> ## End(Don't show)
> anova(glm.D93, test = FALSE)
Analysis of Deviance Table

Model: poisson, link: log

Response: counts

Terms added sequentially (first to last)


          Df Deviance Resid. Df Resid. Dev
NULL                          8    10.5814
outcome    2   5.4523         6     5.1291
treatment  2   0.0000         4     5.1291
> anova(glm.D93, test = "Cp")
Analysis of Deviance Table

Model: poisson, link: log

Response: counts

Terms added sequentially (first to last)


          Df Deviance Resid. Df Resid. Dev     Cp
NULL                          8    10.5814 12.581
outcome    2   5.4523         6     5.1291 11.129
treatment  2   0.0000         4     5.1291 15.129
> anova(glm.D93, test = "Chisq")
Analysis of Deviance Table

Model: poisson, link: log

Response: counts

Terms added sequentially (first to last)


          Df Deviance Resid. Df Resid. Dev Pr(>Chi)  
NULL                          8    10.5814           
outcome    2   5.4523         6     5.1291  0.06547 .
treatment  2   0.0000         4     5.1291  1.00000  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> glm.D93a <-
+    update(glm.D93, ~treatment*outcome) # equivalent to Pearson Chi-square
> anova(glm.D93, glm.D93a, test = "Rao")
Analysis of Deviance Table

Model 1: counts ~ outcome + treatment
Model 2: counts ~ treatment + outcome + treatment:outcome
  Resid. Df Resid. Dev Df Deviance    Rao Pr(>Chi)
1         4     5.1291                            
2         0     0.0000  4   5.1291 5.1732     0.27
> 
> 
> 
> cleanEx()
> nameEx("anova.lm")
> ### * anova.lm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: anova.lm
> ### Title: ANOVA for Linear Model Fits
> ### Aliases: anova.lm anova.lmlist
> ### Keywords: regression models
> 
> ### ** Examples
> 
> ## sequential table
> fit <- lm(sr ~ ., data = LifeCycleSavings)
> anova(fit)
Analysis of Variance Table

Response: sr
          Df Sum Sq Mean Sq F value    Pr(>F)    
pop15      1 204.12 204.118 14.1157 0.0004922 ***
pop75      1  53.34  53.343  3.6889 0.0611255 .  
dpi        1  12.40  12.401  0.8576 0.3593551    
ddpi       1  63.05  63.054  4.3605 0.0424711 *  
Residuals 45 650.71  14.460                      
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> 
> ## same effect via separate models
> fit0 <- lm(sr ~ 1, data = LifeCycleSavings)
> fit1 <- update(fit0, . ~ . + pop15)
> fit2 <- update(fit1, . ~ . + pop75)
> fit3 <- update(fit2, . ~ . + dpi)
> fit4 <- update(fit3, . ~ . + ddpi)
> anova(fit0, fit1, fit2, fit3, fit4, test = "F")
Analysis of Variance Table

Model 1: sr ~ 1
Model 2: sr ~ pop15
Model 3: sr ~ pop15 + pop75
Model 4: sr ~ pop15 + pop75 + dpi
Model 5: sr ~ pop15 + pop75 + dpi + ddpi
  Res.Df    RSS Df Sum of Sq       F    Pr(>F)    
1     49 983.63                                   
2     48 779.51  1   204.118 14.1157 0.0004922 ***
3     47 726.17  1    53.343  3.6889 0.0611255 .  
4     46 713.77  1    12.401  0.8576 0.3593551    
5     45 650.71  1    63.054  4.3605 0.0424711 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> 
> anova(fit4, fit2, fit0, test = "F") # unconventional order
Analysis of Variance Table

Model 1: sr ~ pop15 + pop75 + dpi + ddpi
Model 2: sr ~ pop15 + pop75
Model 3: sr ~ 1
  Res.Df    RSS Df Sum of Sq      F    Pr(>F)    
1     45 650.71                                  
2     47 726.17 -2   -75.455 2.6090 0.0847088 .  
3     49 983.63 -2  -257.460 8.9023 0.0005527 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> 
> 
> 
> cleanEx()
> nameEx("anova.mlm")
> ### * anova.mlm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: anova.mlm
> ### Title: Comparisons between Multivariate Linear Models
> ### Aliases: anova.mlm
> ### Keywords: regression models multivariate
> 
> ### ** Examples
> 
> require(graphics)
> utils::example(SSD) # Brings in the mlmfit and reacttime objects

SSD> # Lifted from Baron+Li:
SSD> # "Notes on the use of R for psychology experiments and questionnaires"
SSD> # Maxwell and Delaney, p. 497
SSD> reacttime <- matrix(c(
SSD+ 420, 420, 480, 480, 600, 780,
SSD+ 420, 480, 480, 360, 480, 600,
SSD+ 480, 480, 540, 660, 780, 780,
SSD+ 420, 540, 540, 480, 780, 900,
SSD+ 540, 660, 540, 480, 660, 720,
SSD+ 360, 420, 360, 360, 480, 540,
SSD+ 480, 480, 600, 540, 720, 840,
SSD+ 480, 600, 660, 540, 720, 900,
SSD+ 540, 600, 540, 480, 720, 780,
SSD+ 480, 420, 540, 540, 660, 780),
SSD+ ncol = 6, byrow = TRUE,
SSD+ dimnames = list(subj = 1:10,
SSD+               cond = c("deg0NA", "deg4NA", "deg8NA",
SSD+                        "deg0NP", "deg4NP", "deg8NP")))

SSD> mlmfit <- lm(reacttime ~ 1)

SSD> SSD(mlmfit)
$SSD
        cond
cond     deg0NA deg4NA deg8NA deg0NP deg4NP deg8NP
  deg0NA  29160  30600  26640  23760  32400  25560
  deg4NA  30600  66600  32400   7200  36000  30600
  deg8NA  26640  32400  56160  41040  57600  69840
  deg0NP  23760   7200  41040  70560  72000  63360
  deg4NP  32400  36000  57600  72000 108000 100800
  deg8NP  25560  30600  69840  63360 100800 122760

$call
lm(formula = reacttime ~ 1)

$df
[1] 9

attr(,"class")
[1] "SSD"

SSD> estVar(mlmfit)
        cond
cond     deg0NA deg4NA deg8NA deg0NP deg4NP deg8NP
  deg0NA   3240   3400   2960   2640   3600   2840
  deg4NA   3400   7400   3600    800   4000   3400
  deg8NA   2960   3600   6240   4560   6400   7760
  deg0NP   2640    800   4560   7840   8000   7040
  deg4NP   3600   4000   6400   8000  12000  11200
  deg8NP   2840   3400   7760   7040  11200  13640
> 
> mlmfit0 <- update(mlmfit, ~0)
> 
> ### Traditional tests of intrasubj. contrasts
> ## Using MANOVA techniques on contrasts:
> anova(mlmfit, mlmfit0, X = ~1)
Analysis of Variance Table

Model 1: reacttime ~ 1
Model 2: reacttime ~ 1 - 1

Contrasts orthogonal to
~1

  Res.Df Df Gen.var. Pillai approx F num Df den Df   Pr(>F)   
1      9      1249.6                                          
2     10  1   2013.2 0.9456   17.381      5      5 0.003534 **
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> 
> ## Assuming sphericity
> anova(mlmfit, mlmfit0, X = ~1, test = "Spherical")
Analysis of Variance Table

Model 1: reacttime ~ 1
Model 2: reacttime ~ 1 - 1

Contrasts orthogonal to
~1

Greenhouse-Geisser epsilon: 0.4855
Huynh-Feldt epsilon:        0.6778

  Res.Df Df Gen.var.      F num Df den Df     Pr(>F)    G-G Pr    H-F Pr
1      9      1249.6                                                    
2     10  1   2013.2 38.028      5     45 4.4711e-15 2.532e-08 7.393e-11
> 
> 
> ### tests using intra-subject 3x2 design
> idata <- data.frame(deg = gl(3, 1, 6, labels = c(0, 4, 8)),
+                     noise = gl(2, 3, 6, labels = c("A", "P")))
> 
> anova(mlmfit, mlmfit0, X = ~ deg + noise,
+       idata = idata, test = "Spherical")
Analysis of Variance Table

Model 1: reacttime ~ 1
Model 2: reacttime ~ 1 - 1

Contrasts orthogonal to
~deg + noise

Greenhouse-Geisser epsilon: 0.904
Huynh-Feldt epsilon:        1.118

  Res.Df Df Gen.var.     F num Df den Df     Pr(>F)     G-G Pr     H-F Pr
1      9      316.58                                                     
2     10  1   996.34 45.31      2     18 9.4241e-08 3.4539e-07 9.4241e-08
> anova(mlmfit, mlmfit0, M = ~ deg + noise, X = ~ noise,
+       idata = idata, test = "Spherical" )
Analysis of Variance Table

Model 1: reacttime ~ 1
Model 2: reacttime ~ 1 - 1

Contrasts orthogonal to
~noise


Contrasts spanned by
~deg + noise

Greenhouse-Geisser epsilon: 0.9616
Huynh-Feldt epsilon:        1.2176

  Res.Df Df Gen.var.      F num Df den Df     Pr(>F)     G-G Pr     H-F Pr
1      9      1007.0                                                      
2     10  1   2703.2 40.719      2     18 2.0868e-07 3.4017e-07 2.0868e-07
> anova(mlmfit, mlmfit0, M = ~ deg + noise, X = ~ deg,
+       idata = idata, test = "Spherical" )
Analysis of Variance Table

Model 1: reacttime ~ 1
Model 2: reacttime ~ 1 - 1

Contrasts orthogonal to
~deg


Contrasts spanned by
~deg + noise

Greenhouse-Geisser epsilon: 1
Huynh-Feldt epsilon:        1

  Res.Df Df Gen.var.      F num Df den Df     Pr(>F)     G-G Pr     H-F Pr
1      9        1410                                                      
2     10  1     6030 33.766      1      9 0.00025597 0.00025597 0.00025597
> 
> f <- factor(rep(1:2, 5)) # bogus, just for illustration
> mlmfit2 <- update(mlmfit, ~f)
> anova(mlmfit2, mlmfit, mlmfit0, X = ~1, test = "Spherical")
Analysis of Variance Table

Model 1: reacttime ~ f
Model 2: reacttime ~ 1
Model 3: reacttime ~ 1 - 1

Contrasts orthogonal to
~1

Greenhouse-Geisser epsilon: 0.4691
Huynh-Feldt epsilon:        0.6758

  Res.Df Df Gen.var.       F num Df den Df  Pr(>F)  G-G Pr  H-F Pr
1      8      1337.3                                              
2      9  1   1249.6  0.2743      5     40 0.92452 0.79608 0.86456
3     10  1   2013.2 34.9615      5     40 0.00000 0.00000 0.00000
> anova(mlmfit2, X = ~1, test = "Spherical")
Analysis of Variance Table


Contrasts orthogonal to
~1

Greenhouse-Geisser epsilon: 0.4691
Huynh-Feldt epsilon:        0.6758

            Df       F num Df den Df  Pr(>F)  G-G Pr  H-F Pr
(Intercept)  1 34.9615      5     40 0.00000 0.00000 0.00000
f            1  0.2743      5     40 0.92452 0.79608 0.86456
Residuals    8                                              
> # one-model form, eqiv. to previous
> 
> ### There seems to be a strong interaction in these data
> plot(colMeans(reacttime))
> 
> 
> 
> cleanEx()
> nameEx("ansari.test")
> ### * ansari.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ansari.test
> ### Title: Ansari-Bradley Test
> ### Aliases: ansari.test ansari.test.default ansari.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Hollander & Wolfe (1973, p. 86f):
> ## Serum iron determination using Hyland control sera
> ramsay <- c(111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
+             101, 96, 97, 102, 107, 113, 116, 113, 110, 98)
> jung.parekh <- c(107, 108, 106, 98, 105, 103, 110, 105, 104,
+             100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99)
> ansari.test(ramsay, jung.parekh)
Warning in ansari.test.default(ramsay, jung.parekh) :
  cannot compute exact p-value with ties

	Ansari-Bradley test

data:  ramsay and jung.parekh
AB = 185.5, p-value = 0.1815
alternative hypothesis: true ratio of scales is not equal to 1

> 
> ansari.test(rnorm(10), rnorm(10, 0, 2), conf.int = TRUE)

	Ansari-Bradley test

data:  rnorm(10) and rnorm(10, 0, 2)
AB = 69, p-value = 0.03831
alternative hypothesis: true ratio of scales is not equal to 1
95 percent confidence interval:
 0.1852324 0.9712857
sample estimates:
ratio of scales 
      0.4007458 

> 
> ## try more points - failed in 2.4.1
> ansari.test(rnorm(100), rnorm(100, 0, 2), conf.int = TRUE)

	Ansari-Bradley test

data:  rnorm(100) and rnorm(100, 0, 2)
AB = 6180, p-value = 3.347e-08
alternative hypothesis: true ratio of scales is not equal to 1
95 percent confidence interval:
 0.3330596 0.5693532
sample estimates:
ratio of scales 
      0.4346784 

> 
> 
> 
> cleanEx()
> nameEx("aov")
> ### * aov
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: aov
> ### Title: Fit an Analysis of Variance Model
> ### Aliases: aov print.aov print.aovlist Error
> ### Keywords: models regression
> 
> ### ** Examples
> 
> ## From Venables and Ripley (2002) p.165.
> 
> ## Set orthogonal contrasts.
> op <- options(contrasts = c("contr.helmert", "contr.poly"))
> ( npk.aov <- aov(yield ~ block + N*P*K, npk) )
Call:
   aov(formula = yield ~ block + N * P * K, data = npk)

Terms:
                   block        N        P        K      N:P      N:K      P:K
Sum of Squares  343.2950 189.2817   8.4017  95.2017  21.2817  33.1350   0.4817
Deg. of Freedom        5        1        1        1        1        1        1
                Residuals
Sum of Squares   185.2867
Deg. of Freedom        12

Residual standard error: 3.929447
1 out of 13 effects not estimable
Estimated effects are balanced
> coefficients(npk.aov)
(Intercept)      block1      block2      block3      block4      block5 
 54.8750000   1.7125000   1.6791667  -1.8229167  -1.0137500   0.2950000 
         N1          P1          K1       N1:P1       N1:K1       P1:K1 
  2.8083333  -0.5916667  -1.9916667  -0.9416667  -1.1750000   0.1416667 
> 
> ## to show the effects of re-ordering terms contrast the two fits
> aov(yield ~ block + N * P + K, npk)
Call:
   aov(formula = yield ~ block + N * P + K, data = npk)

Terms:
                   block        N        P        K      N:P Residuals
Sum of Squares  343.2950 189.2817   8.4017  95.2017  21.2817  218.9033
Deg. of Freedom        5        1        1        1        1        14

Residual standard error: 3.954232
Estimated effects are balanced
> aov(terms(yield ~ block + N * P + K, keep.order = TRUE), npk)
Call:
   aov(formula = terms(yield ~ block + N * P + K, keep.order = TRUE), 
    data = npk)

Terms:
                   block        N        P      N:P        K Residuals
Sum of Squares  343.2950 189.2817   8.4017  21.2817  95.2017  218.9033
Deg. of Freedom        5        1        1        1        1        14

Residual standard error: 3.954232
Estimated effects are balanced
> 
> 
> ## as a test, not particularly sensible statistically
> npk.aovE <- aov(yield ~  N*P*K + Error(block), npk)
> npk.aovE

Call:
aov(formula = yield ~ N * P * K + Error(block), data = npk)

Grand Mean: 54.875

Stratum 1: block

Terms:
                    N:P:K Residuals
Sum of Squares   37.00167 306.29333
Deg. of Freedom         1         4

Residual standard error: 8.750619
Estimated effects are balanced

Stratum 2: Within

Terms:
                        N         P         K       N:P       N:K       P:K
Sum of Squares  189.28167   8.40167  95.20167  21.28167  33.13500   0.48167
Deg. of Freedom         1         1         1         1         1         1
                Residuals
Sum of Squares  185.28667
Deg. of Freedom        12

Residual standard error: 3.929447
Estimated effects are balanced
> ## IGNORE_RDIFF_BEGIN
> summary(npk.aovE)

Error: block
          Df Sum Sq Mean Sq F value Pr(>F)
N:P:K      1   37.0   37.00   0.483  0.525
Residuals  4  306.3   76.57               

Error: Within
          Df Sum Sq Mean Sq F value  Pr(>F)   
N          1 189.28  189.28  12.259 0.00437 **
P          1   8.40    8.40   0.544 0.47490   
K          1  95.20   95.20   6.166 0.02880 * 
N:P        1  21.28   21.28   1.378 0.26317   
N:K        1  33.13   33.13   2.146 0.16865   
P:K        1   0.48    0.48   0.031 0.86275   
Residuals 12 185.29   15.44                   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> ## IGNORE_RDIFF_END
> options(op)  # reset to previous
> 
> 
> 
> base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly"))
> cleanEx()
> nameEx("approxfun")
> ### * approxfun
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: approxfun
> ### Title: Interpolation Functions
> ### Aliases: approx approxfun
> ### Keywords: arith dplot
> 
> ### ** Examples
> 
> require(graphics)
> 
> x <- 1:10
> y <- rnorm(10)
> par(mfrow = c(2,1))
> plot(x, y, main = "approx(.) and approxfun(.)")
> points(approx(x, y), col = 2, pch = "*")
> points(approx(x, y, method = "constant"), col = 4, pch = "*")
> 
> f <- approxfun(x, y)
> curve(f(x), 0, 11, col = "green2")
> points(x, y)
> is.function(fc <- approxfun(x, y, method = "const")) # TRUE
[1] TRUE
> curve(fc(x), 0, 10, col = "darkblue", add = TRUE)
> ## different extrapolation on left and right side :
> plot(approxfun(x, y, rule = 2:1), 0, 11,
+      col = "tomato", add = TRUE, lty = 3, lwd = 2)
> 
> ### Treatment of 'NA's -- are kept if  na.rm=FALSE :
> 
> xn <- 1:4
> yn <- c(1,NA,3:4)
> xout <- (1:9)/2
> ## Default behavior (na.rm = TRUE): NA's omitted; extrapolation gives NA
> data.frame(approx(xn,yn, xout))
    x   y
1 0.5  NA
2 1.0 1.0
3 1.5 1.5
4 2.0 2.0
5 2.5 2.5
6 3.0 3.0
7 3.5 3.5
8 4.0 4.0
9 4.5  NA
> data.frame(approx(xn,yn, xout, rule = 2))# -> *constant* extrapolation
    x   y
1 0.5 1.0
2 1.0 1.0
3 1.5 1.5
4 2.0 2.0
5 2.5 2.5
6 3.0 3.0
7 3.5 3.5
8 4.0 4.0
9 4.5 4.0
> ## New (2019-2020)  na.rm = FALSE: NA's are "kept"
> data.frame(approx(xn,yn, xout, na.rm=FALSE, rule = 2))
    x   y
1 0.5 1.0
2 1.0 1.0
3 1.5  NA
4 2.0  NA
5 2.5  NA
6 3.0 3.0
7 3.5 3.5
8 4.0 4.0
9 4.5 4.0
> data.frame(approx(xn,yn, xout, na.rm=FALSE, rule = 2, method="constant"))
    x  y
1 0.5  1
2 1.0  1
3 1.5  1
4 2.0 NA
5 2.5 NA
6 3.0  3
7 3.5  3
8 4.0  4
9 4.5  4
> 
> ## NA's in x[] are not allowed:
> stopifnot(inherits( try( approx(yn,yn, na.rm=FALSE) ), "try-error"))
Error in approx(yn, yn, na.rm = FALSE) : 
  approx(x,y, .., na.rm=FALSE): NA values in x are not allowed
> 
> ## Give a nice overview of all possibilities  rule * method * na.rm :
> ##             -----------------------------  ====   ======   =====
> ## extrapolations "N":= NA;   "C":= Constant :
> rules <- list(N=1, C=2, NC=1:2, CN=2:1)
> methods <- c("constant","linear")
> ry <- sapply(rules, function(R) {
+        sapply(methods, function(M)
+         sapply(setNames(,c(TRUE,FALSE)), function(na.)
+                  approx(xn, yn, xout=xout, method=M, rule=R, na.rm=na.)$y),
+         simplify="array")
+    }, simplify="array")
> names(dimnames(ry)) <- c("x = ", "na.rm", "method", "rule")
> dimnames(ry)[[1]] <- format(xout)
> ftable(aperm(ry, 4:1)) # --> (4 * 2 * 2) x length(xout)  =  16 x 9 matrix
                    x =  0.5 1.0 1.5 2.0 2.5 3.0 3.5 4.0 4.5
rule method   na.rm                                         
N    constant TRUE        NA 1.0 1.0 1.0 1.0 3.0 3.0 4.0  NA
              FALSE       NA 1.0 1.0  NA  NA 3.0 3.0 4.0  NA
     linear   TRUE        NA 1.0 1.5 2.0 2.5 3.0 3.5 4.0  NA
              FALSE       NA 1.0  NA  NA  NA 3.0 3.5 4.0  NA
C    constant TRUE       1.0 1.0 1.0 1.0 1.0 3.0 3.0 4.0 4.0
              FALSE      1.0 1.0 1.0  NA  NA 3.0 3.0 4.0 4.0
     linear   TRUE       1.0 1.0 1.5 2.0 2.5 3.0 3.5 4.0 4.0
              FALSE      1.0 1.0  NA  NA  NA 3.0 3.5 4.0 4.0
NC   constant TRUE        NA 1.0 1.0 1.0 1.0 3.0 3.0 4.0 4.0
              FALSE       NA 1.0 1.0  NA  NA 3.0 3.0 4.0 4.0
     linear   TRUE        NA 1.0 1.5 2.0 2.5 3.0 3.5 4.0 4.0
              FALSE       NA 1.0  NA  NA  NA 3.0 3.5 4.0 4.0
CN   constant TRUE       1.0 1.0 1.0 1.0 1.0 3.0 3.0 4.0  NA
              FALSE      1.0 1.0 1.0  NA  NA 3.0 3.0 4.0  NA
     linear   TRUE       1.0 1.0 1.5 2.0 2.5 3.0 3.5 4.0  NA
              FALSE      1.0 1.0  NA  NA  NA 3.0 3.5 4.0  NA
> ## Don't show: 
> ryna <- ry
> ryna[is.na(ryna)] <- NA ## approx may turn NAs into NaNs
> stopifnot(exprs = {
+  identical(unname(ryna),
+    array(c(NA, 1, 1,   1, 1, 3, 3, 4, NA,      NA, 1,  1, NA, NA, 3, 3, 4, NA,
+            NA, 1, 1.5, 2, 2.5, 3, 3.5, 4, NA,  NA, 1, NA, NA, NA, 3, 3.5, 4, NA,
+             1, 1, 1,   1, 1, 3, 3, 4, 4,        1, 1,  1, NA, NA, 3, 3, 4, 4,
+             1, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4,    1, 1, NA, NA, NA, 3, 3.5, 4, 4,
+            NA, 1, 1,   1, 1, 3, 3, 4, 4,       NA, 1,  1, NA, NA, 3, 3, 4, 4,
+            NA, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4,   NA, 1, NA, NA, NA, 3, 3.5, 4, 4,
+             1, 1, 1,   1, 1, 3, 3, 4, NA,       1, 1,  1, NA, NA, 3, 3, 4, NA,
+             1, 1, 1.5, 2, 2.5, 3, 3.5, 4, NA,   1, 1, NA, NA, NA, 3, 3.5, 4, NA),
+          dim = c(9L, 2L, 2L, 4L)))
+  identical(approxfun(xn,yn, method="constant", rule=2, na.rm=FALSE)(xout),
+             as.vector(ry[,"FALSE", "constant","C"]))
+  identical(approxfun(xn,yn, method="linear", rule=2:1, na.rm=FALSE)(xout),
+             as.vector(ry[,"FALSE", "linear", "CN"]))
+ })
> ## End(Don't show)
> 
> ## Show treatment of 'ties' :
> 
> x <- c(2,2:4,4,4,5,5,7,7,7)
> y <- c(1:6, 5:4, 3:1)
> (amy <- approx(x, y, xout = x)$y) # warning, can be avoided by specifying 'ties=':
Warning in regularize.values(x, y, ties, missing(ties), na.rm = na.rm) :
  collapsing to unique 'x' values
 [1] 1.5 1.5 3.0 5.0 5.0 5.0 4.5 4.5 2.0 2.0 2.0
> op <- options(warn=2) # warnings would be error
> stopifnot(identical(amy, approx(x, y, xout = x, ties=mean)$y))
> (ay <- approx(x, y, xout = x, ties = "ordered")$y)
 [1] 2 2 3 6 6 6 4 4 1 1 1
> stopifnot(amy == c(1.5,1.5, 3, 5,5,5, 4.5,4.5, 2,2,2),
+           ay  == c(2, 2,    3, 6,6,6, 4, 4,    1,1,1))
> approx(x, y, xout = x, ties = min)$y
 [1] 1 1 3 4 4 4 4 4 1 1 1
> approx(x, y, xout = x, ties = max)$y
 [1] 2 2 3 6 6 6 5 5 3 3 3
> options(op) # revert 'warn'ing level
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("ar")
> ### * ar
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ar
> ### Title: Fit Autoregressive Models to Time Series
> ### Aliases: ar ar.burg ar.burg.default ar.burg.mts ar.yw ar.yw.default
> ###   ar.yw.mts ar.mle print.ar predict.ar
> ### Keywords: ts
> 
> ### ** Examples
> 
> ar(lh)

Call:
ar(x = lh)

Coefficients:
      1        2        3  
 0.6534  -0.0636  -0.2269  

Order selected 3  sigma^2 estimated as  0.1959
> ar(lh, method = "burg")

Call:
ar(x = lh, method = "burg")

Coefficients:
      1        2        3  
 0.6588  -0.0608  -0.2234  

Order selected 3  sigma^2 estimated as  0.1786
> ar(lh, method = "ols")

Call:
ar(x = lh, method = "ols")

Coefficients:
    1  
0.586  

Intercept: 0.006234 (0.06551) 

Order selected 1  sigma^2 estimated as  0.2016
> ar(lh, FALSE, 4) # fit ar(4)

Call:
ar(x = lh, aic = FALSE, order.max = 4)

Coefficients:
      1        2        3        4  
 0.6767  -0.0571  -0.2941   0.1028  

Order selected 4  sigma^2 estimated as  0.1983
> 
> (sunspot.ar <- ar(sunspot.year))

Call:
ar(x = sunspot.year)

Coefficients:
      1        2        3        4        5        6        7        8  
 1.1305  -0.3524  -0.1745   0.1403  -0.1358   0.0963  -0.0556   0.0076  
      9  
 0.1941  

Order selected 9  sigma^2 estimated as  267.5
> predict(sunspot.ar, n.ahead = 25)
$pred
Time Series:
Start = 1989 
End = 2013 
Frequency = 1 
 [1] 135.25933 148.09051 133.98476 106.61344  71.21921  40.84057  18.70100
 [8]  11.52416  27.24208  56.99888  87.86705 107.62926 111.05437  98.05484
[15]  74.84085  48.80128  27.65441  18.15075  23.15355  40.04723  61.95906
[22]  80.79092  90.11420  87.44131  74.42284

$se
Time Series:
Start = 1989 
End = 2013 
Frequency = 1 
 [1] 16.35519 24.68467 28.95653 29.97401 30.07714 30.15629 30.35971 30.58793
 [9] 30.71100 30.74276 31.42565 32.96467 34.48910 35.33601 35.51890 35.52034
[17] 35.65505 35.90628 36.07084 36.08139 36.16818 36.56324 37.16527 37.64820
[25] 37.83954

> ## try the other methods too
> 
> ar(ts.union(BJsales, BJsales.lead))

Call:
ar(x = ts.union(BJsales, BJsales.lead))

$ar
, , 1

             BJsales BJsales.lead
BJsales       0.9499       0.8222
BJsales.lead  0.0276       0.4970

, , 2

              BJsales BJsales.lead
BJsales       0.02041       -1.133
BJsales.lead -0.02193        0.294

, , 3

               BJsales BJsales.lead
BJsales      -0.186490       3.9415
BJsales.lead -0.002946       0.1264


$var.pred
             BJsales BJsales.lead
BJsales      13.9431       0.7733
BJsales.lead  0.7733       0.1231

> ## Burg is quite different here, as is OLS (see ar.ols)
> ar(ts.union(BJsales, BJsales.lead), method = "burg")

Call:
ar(x = ts.union(BJsales, BJsales.lead), method = "burg")

$ar
, , 1

             BJsales BJsales.lead
BJsales      1.21197      0.07312
BJsales.lead 0.07411      0.45684

, , 2

              BJsales BJsales.lead
BJsales      -0.26022      -0.1120
BJsales.lead -0.06904       0.3111

, , 3

              BJsales BJsales.lead
BJsales      -0.01754      3.93591
BJsales.lead  0.01792      0.09632

, , 4

              BJsales BJsales.lead
BJsales      -0.07746     -1.33836
BJsales.lead -0.01158     -0.09118


$var.pred
             BJsales BJsales.lead
BJsales      0.38426      0.01315
BJsales.lead 0.01315      0.07657

> 
> 
> 
> cleanEx()
> nameEx("ar.ols")
> ### * ar.ols
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ar.ols
> ### Title: Fit Autoregressive Models to Time Series by OLS
> ### Aliases: ar.ols
> ### Keywords: ts
> 
> ### ** Examples
> 
> ar(lh, method = "burg")

Call:
ar(x = lh, method = "burg")

Coefficients:
      1        2        3  
 0.6588  -0.0608  -0.2234  

Order selected 3  sigma^2 estimated as  0.1786
> ar.ols(lh)

Call:
ar.ols(x = lh)

Coefficients:
    1  
0.586  

Intercept: 0.006234 (0.06551) 

Order selected 1  sigma^2 estimated as  0.2016
> ar.ols(lh, FALSE, 4) # fit ar(4)

Call:
ar.ols(x = lh, aic = FALSE, order.max = 4)

Coefficients:
      1        2        3        4  
 0.6761  -0.0571  -0.3001   0.0967  

Intercept: 0.0004346 (0.06642) 

Order selected 4  sigma^2 estimated as  0.1924
> 
> ar.ols(ts.union(BJsales, BJsales.lead))

Call:
ar.ols(x = ts.union(BJsales, BJsales.lead))

$ar
, , 1

             BJsales BJsales.lead
BJsales      0.40408     -0.05144
BJsales.lead 0.07348      0.54524

, , 2

             BJsales BJsales.lead
BJsales       0.2367      0.05899
BJsales.lead -0.1224      0.28690

, , 3

             BJsales BJsales.lead
BJsales      0.09673       4.7413
BJsales.lead 0.10469       0.0701

, , 4

              BJsales BJsales.lead
BJsales      0.048758       1.5671
BJsales.lead 0.003482      -0.1838

, , 5

              BJsales BJsales.lead
BJsales       0.23307      -0.1574
BJsales.lead -0.02707       0.2623

, , 6

              BJsales BJsales.lead
BJsales       0.07494      -0.4313
BJsales.lead -0.08729      -0.2166

, , 7

              BJsales BJsales.lead
BJsales      -0.09946      -0.6233
BJsales.lead  0.27123      -0.2584

, , 8

              BJsales BJsales.lead
BJsales       0.06725     -1.36469
BJsales.lead -0.21501      0.06115

, , 9

              BJsales BJsales.lead
BJsales      -0.11531       -1.464
BJsales.lead  0.03536        0.270

, , 10

               BJsales BJsales.lead
BJsales      -0.002037      -0.6472
BJsales.lead  0.001284      -1.0667

, , 11

              BJsales BJsales.lead
BJsales      0.004065      -0.7009
BJsales.lead 0.002098       0.4890


$x.intercept
     BJsales BJsales.lead 
     0.19630     -0.07022 

$var.pred
                BJsales BJsales.lead
BJsales       0.0400996   -0.0009311
BJsales.lead -0.0009311    0.0682137

> 
> x <- diff(log(EuStockMarkets))
> ar.ols(x, order.max = 6, demean = FALSE, intercept = TRUE)

Call:
ar.ols(x = x, order.max = 6, demean = FALSE, intercept = TRUE)

$ar
, , 1

           DAX       SMI       CAC    FTSE
DAX   0.004560 -0.095781  0.039975 0.04856
SMI  -0.009204 -0.007142  0.037758 0.06826
CAC  -0.026624 -0.113688  0.063807 0.09154
FTSE -0.010299 -0.089246 -0.003195 0.16409


$x.intercept
      DAX       SMI       CAC      FTSE 
0.0006941 0.0007813 0.0004866 0.0004388 

$var.pred
           DAX       SMI       CAC      FTSE
DAX  1.056e-04 6.683e-05 8.274e-05 5.192e-05
SMI  6.683e-05 8.496e-05 6.252e-05 4.254e-05
CAC  8.274e-05 6.252e-05 1.207e-04 5.615e-05
FTSE 5.192e-05 4.254e-05 5.615e-05 6.224e-05

> 
> 
> 
> cleanEx()
> nameEx("arima")
> ### * arima
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: arima
> ### Title: ARIMA Modelling of Time Series
> ### Aliases: arima
> ### Keywords: ts
> 
> ### ** Examples
> 
> arima(lh, order = c(1,0,0))

Call:
arima(x = lh, order = c(1, 0, 0))

Coefficients:
         ar1  intercept
      0.5739     2.4133
s.e.  0.1161     0.1466

sigma^2 estimated as 0.1975:  log likelihood = -29.38,  aic = 64.76
> arima(lh, order = c(3,0,0))

Call:
arima(x = lh, order = c(3, 0, 0))

Coefficients:
         ar1      ar2      ar3  intercept
      0.6448  -0.0634  -0.2198     2.3931
s.e.  0.1394   0.1668   0.1421     0.0963

sigma^2 estimated as 0.1787:  log likelihood = -27.09,  aic = 64.18
> arima(lh, order = c(1,0,1))

Call:
arima(x = lh, order = c(1, 0, 1))

Coefficients:
         ar1     ma1  intercept
      0.4522  0.1982     2.4101
s.e.  0.1769  0.1705     0.1358

sigma^2 estimated as 0.1923:  log likelihood = -28.76,  aic = 65.52
> 
> arima(lh, order = c(3,0,0), method = "CSS")

Call:
arima(x = lh, order = c(3, 0, 0), method = "CSS")

Coefficients:
         ar1      ar2      ar3  intercept
      0.6578  -0.0658  -0.2348     2.3918
s.e.  0.1414   0.1702   0.1473     0.0983

sigma^2 estimated as 0.1905:  part log likelihood = -28.31
> 
> arima(USAccDeaths, order = c(0,1,1), seasonal = list(order = c(0,1,1)))

Call:
arima(x = USAccDeaths, order = c(0, 1, 1), seasonal = list(order = c(0, 1, 1)))

Coefficients:
          ma1     sma1
      -0.4303  -0.5528
s.e.   0.1228   0.1784

sigma^2 estimated as 99347:  log likelihood = -425.44,  aic = 856.88
> arima(USAccDeaths, order = c(0,1,1), seasonal = list(order = c(0,1,1)),
+       method = "CSS") # drops first 13 observations.

Call:
arima(x = USAccDeaths, order = c(0, 1, 1), seasonal = list(order = c(0, 1, 1)), 
    method = "CSS")

Coefficients:
          ma1     sma1
      -0.3732  -0.4549
s.e.   0.1366   0.1436

sigma^2 estimated as 110330:  part log likelihood = -426.25
> # for a model with as few years as this, we want full ML
> 
> arima(LakeHuron, order = c(2,0,0), xreg = time(LakeHuron) - 1920)

Call:
arima(x = LakeHuron, order = c(2, 0, 0), xreg = time(LakeHuron) - 1920)

Coefficients:
         ar1      ar2  intercept  time(LakeHuron) - 1920
      1.0048  -0.2913   579.0993                 -0.0216
s.e.  0.0976   0.1004     0.2370                  0.0081

sigma^2 estimated as 0.4566:  log likelihood = -101.2,  aic = 212.4
> 
> ## presidents contains NAs
> ## graphs in example(acf) suggest order 1 or 3
> require(graphics)
> (fit1 <- arima(presidents, c(1, 0, 0)))

Call:
arima(x = presidents, order = c(1, 0, 0))

Coefficients:
         ar1  intercept
      0.8242    56.1505
s.e.  0.0555     4.6434

sigma^2 estimated as 85.47:  log likelihood = -416.89,  aic = 839.78
> nobs(fit1)
[1] 114
> tsdiag(fit1)
> (fit3 <- arima(presidents, c(3, 0, 0)))  # smaller AIC

Call:
arima(x = presidents, order = c(3, 0, 0))

Coefficients:
         ar1     ar2      ar3  intercept
      0.7496  0.2523  -0.1890    56.2223
s.e.  0.0936  0.1140   0.0946     4.2845

sigma^2 estimated as 81.12:  log likelihood = -414.08,  aic = 838.16
> tsdiag(fit3)
> BIC(fit1, fit3)
     df      BIC
fit1  3 847.9931
fit3  5 851.8449
> ## compare a whole set of models; BIC() would choose the smallest
> AIC(fit1, arima(presidents, c(2,0,0)),
+           arima(presidents, c(2,0,1)), # <- chosen (barely) by AIC
+     fit3, arima(presidents, c(3,0,1)))
                              df      AIC
fit1                           3 839.7845
arima(presidents, c(2, 0, 0))  4 840.0458
arima(presidents, c(2, 0, 1))  5 838.1272
fit3                           5 838.1639
arima(presidents, c(3, 0, 1))  6 838.8124
> 
> ## An example of using the  'fixed'  argument:
> ## Note that the period of the seasonal component is taken to be
> ## frequency(presidents), i.e. 4.
> (fitSfx <- arima(presidents, order=c(2,0,1), seasonal=c(1,0,0),
+                  fixed=c(NA, NA, 0.5, -0.1, 50), transform.pars=FALSE))

Call:
arima(x = presidents, order = c(2, 0, 1), seasonal = c(1, 0, 0), transform.pars = FALSE, 
    fixed = c(NA, NA, 0.5, -0.1, 50))

Coefficients:
         ar1     ar2  ma1  sar1  intercept
      0.2047  0.6269  0.5  -0.1         50
s.e.  0.0734  0.0747  0.0   0.0          0

sigma^2 estimated as 84.01:  log likelihood = -416.07,  aic = 838.14
> ## The partly-fixed & smaller model seems better (as we "knew too much"):
> AIC(fitSfx, arima(presidents, order=c(2,0,1), seasonal=c(1,0,0)))
                                                             df      AIC
fitSfx                                                        3 838.1406
arima(presidents, order = c(2, 0, 1), seasonal = c(1, 0, 0))  6 839.7095
> 
> ## An example of ARIMA forecasting:
> predict(fit3, 3)
$pred
         Qtr1     Qtr2     Qtr3
1975 29.84194 34.41014 39.30815

$se
         Qtr1     Qtr2     Qtr3
1975  9.00655 11.25606 13.43389

> 
> 
> 
> cleanEx()
> nameEx("arima.sim")
> ### * arima.sim
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: arima.sim
> ### Title: Simulate from an ARIMA Model
> ### Aliases: arima.sim
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> arima.sim(n = 63, list(ar = c(0.8897, -0.4858), ma = c(-0.2279, 0.2488)),
+           sd = sqrt(0.1796))
Time Series:
Start = 1 
End = 63 
Frequency = 1 
 [1]  0.55638574  0.24800500 -0.81744783 -0.38508849 -0.23890454 -0.02072714
 [7] -0.51654590 -0.52651638 -0.14927781  0.60798558  0.48272646  0.45160066
[13]  0.09619832 -0.67131480 -0.69254708 -0.56224691 -0.19460368  0.53033791
[19]  0.77731125  0.40648873 -0.02697856  0.08101569  0.22706682 -0.10950180
[25] -0.38234797 -0.13676254  0.27995530  0.23211869  0.53580489  0.43571147
[31] -0.07756004 -0.03502285 -0.56957457  0.26261214  1.09213953  0.64829971
[37] -0.15198589 -0.14659775 -0.27894630  0.91394089  0.68582187  0.71550492
[43]  0.24452551 -0.37501019 -0.29768875 -0.94420587  0.11988146  0.29844068
[49]  1.26774646  0.99077203  0.14790214  0.02779843 -0.57682386 -0.90337297
[55] -0.37740926 -0.24511724  0.03927715  0.13868087 -0.15259447 -0.37935616
[61] -0.32790595  0.34491331 -0.30754547
> # mildly long-tailed
> arima.sim(n = 63, list(ar = c(0.8897, -0.4858), ma = c(-0.2279, 0.2488)),
+           rand.gen = function(n, ...) sqrt(0.1796) * rt(n, df = 5))
Time Series:
Start = 1 
End = 63 
Frequency = 1 
 [1] -0.08101433 -0.37596592 -1.05656365 -0.99217317 -0.15199924 -0.06140144
 [7] -0.55852200 -0.54700937 -0.72098523 -0.98045523 -0.80880380 -0.61217798
[13] -0.66268921 -0.29302949 -0.83022714 -0.08803618 -0.37932440  0.10532621
[19]  0.08033289 -0.29269083 -0.69193397  0.81677306 -0.25402288 -0.08812258
[25] -0.34117754  0.29026870 -0.54848673 -0.44974248 -0.34110521 -0.17826307
[31] -0.35396760  0.98465366  0.21136827  0.05042017 -0.02400316 -0.42642295
[37] -0.61906692  1.92955621 -0.36050863 -0.68488280  0.10141464  0.64328982
[43]  0.31906603  0.17275054 -0.13570368 -0.13451166 -0.11717037  0.02330814
[49]  0.29918521  0.05938999 -0.20355761 -0.02439309 -1.14548572 -0.94045141
[55]  0.44845239  1.76898773  1.78579981  1.16734413  0.33858833 -0.41153063
[61] -0.31037109 -0.31929663  0.17496536
> 
> # An ARIMA simulation
> ts.sim <- arima.sim(list(order = c(1,1,0), ar = 0.7), n = 200)
> ts.plot(ts.sim)
> 
> 
> 
> cleanEx()
> nameEx("arima0")
> ### * arima0
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: arima0
> ### Title: ARIMA Modelling of Time Series - Preliminary Version
> ### Aliases: arima0 print.arima0 predict.arima0
> ### Keywords: ts
> 
> ### ** Examples
> 
> ## Not run: arima0(lh, order = c(1,0,0))
> arima0(lh, order = c(3,0,0))

Call:
arima0(x = lh, order = c(3, 0, 0))

Coefficients:
         ar1      ar2      ar3  intercept
      0.6448  -0.0634  -0.2198     2.3931
s.e.  0.1394   0.1668   0.1421     0.0963

sigma^2 estimated as 0.1787:  log likelihood = -27.09,  aic = 64.18
> arima0(lh, order = c(1,0,1))

Call:
arima0(x = lh, order = c(1, 0, 1))

Coefficients:
         ar1     ma1  intercept
      0.4521  0.1983     2.4101
s.e.  0.1357  0.1777     0.1357

sigma^2 estimated as 0.1923:  log likelihood = -28.76,  aic = 65.52
> predict(arima0(lh, order = c(3,0,0)), n.ahead = 12)
$pred
Time Series:
Start = 49 
End = 60 
Frequency = 1 
 [1] 2.460173 2.270829 2.198597 2.260696 2.346933 2.414479 2.438918 2.431440
 [9] 2.410223 2.391645 2.382653 2.382697

$se
Time Series:
Start = 49 
End = 60 
Frequency = 1 
 [1] 0.4226823 0.5029332 0.5245256 0.5247161 0.5305499 0.5369159 0.5388045
 [8] 0.5388448 0.5391043 0.5395174 0.5396991 0.5397140

> 
> arima0(lh, order = c(3,0,0), method = "CSS")

Call:
arima0(x = lh, order = c(3, 0, 0), method = "CSS")

Coefficients:
         ar1      ar2      ar3  intercept
      0.6580  -0.0660  -0.2339     2.3999
s.e.  0.1414   0.1702   0.1469     0.0981

sigma^2 estimated as 0.1905:  part log likelihood = -28.31
> 
> # for a model with as few years as this, we want full ML
> (fit <- arima0(USAccDeaths, order = c(0,1,1),
+                seasonal = list(order=c(0,1,1)), delta = -1))

Call:
arima0(x = USAccDeaths, order = c(0, 1, 1), seasonal = list(order = c(0, 1, 
    1)), delta = -1)

Coefficients:
          ma1     sma1
      -0.4304  -0.5526
s.e.   0.1228   0.1785

sigma^2 estimated as 99355:  log likelihood = -425.44,  aic = 856.88
> predict(fit, n.ahead = 6)
$pred
          Jan      Feb      Mar      Apr      May      Jun
1979 8336.028 7531.760 8314.593 8616.864 9488.916 9859.727

$se
          Jan      Feb      Mar      Apr      May      Jun
1979 315.4607 362.9949 404.9878 443.0180 478.0322 510.6511

> 
> arima0(LakeHuron, order = c(2,0,0), xreg = time(LakeHuron)-1920)

Call:
arima0(x = LakeHuron, order = c(2, 0, 0), xreg = time(LakeHuron) - 1920)

Coefficients:
         ar1      ar2  intercept     xreg
      1.0048  -0.2913   579.0985  -0.0216
s.e.  0.0976   0.1004     0.2370   0.0081

sigma^2 estimated as 0.4566:  log likelihood = -101.2,  aic = 212.4
> ## Not run: 
> ##D ## presidents contains NAs
> ##D ## graphs in example(acf) suggest order 1 or 3
> ##D (fit1 <- arima0(presidents, c(1, 0, 0), delta = -1))  # avoid warning
> ##D tsdiag(fit1)
> ##D (fit3 <- arima0(presidents, c(3, 0, 0), delta = -1))  # smaller AIC
> ##D tsdiag(fit3)
> ## End(Not run)
> 
> 
> 
> cleanEx()
> nameEx("as.hclust")
> ### * as.hclust
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: as.hclust
> ### Title: Convert Objects to Class '"hclust"'
> ### Aliases: as.hclust as.hclust.default as.hclust.twins
> ### Keywords: multivariate cluster
> 
> ### ** Examples
> 
> x <- matrix(rnorm(30), ncol = 3)
> hc <- hclust(dist(x), method = "complete")
> 
> 
> 
> 
> cleanEx()
> nameEx("asOneSidedFormula")
> ### * asOneSidedFormula
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: asOneSidedFormula
> ### Title: Convert to One-Sided Formula
> ### Aliases: asOneSidedFormula
> ### Keywords: models
> 
> ### ** Examples
> 
> (form <- asOneSidedFormula("age"))
~age
> stopifnot(exprs = {
+     identical(form, asOneSidedFormula(form))
+     identical(form, asOneSidedFormula(as.name("age")))
+     identical(form, asOneSidedFormula(expression(age)))
+ })
> asOneSidedFormula(quote(log(age)))
~log(age)
> asOneSidedFormula(1)
~1
> 
> 
> 
> cleanEx()
> nameEx("ave")
> ### * ave
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ave
> ### Title: Group Averages Over Level Combinations of Factors
> ### Aliases: ave
> ### Keywords: univar category
> 
> ### ** Examples
> 
> require(graphics)
> 
> ave(1:3)  # no grouping -> grand mean
[1] 2 2 2
> 
> attach(warpbreaks)
> ave(breaks, wool)
 [1] 31.03704 31.03704 31.03704 31.03704 31.03704 31.03704 31.03704 31.03704
 [9] 31.03704 31.03704 31.03704 31.03704 31.03704 31.03704 31.03704 31.03704
[17] 31.03704 31.03704 31.03704 31.03704 31.03704 31.03704 31.03704 31.03704
[25] 31.03704 31.03704 31.03704 25.25926 25.25926 25.25926 25.25926 25.25926
[33] 25.25926 25.25926 25.25926 25.25926 25.25926 25.25926 25.25926 25.25926
[41] 25.25926 25.25926 25.25926 25.25926 25.25926 25.25926 25.25926 25.25926
[49] 25.25926 25.25926 25.25926 25.25926 25.25926 25.25926
> ave(breaks, tension)
 [1] 36.38889 36.38889 36.38889 36.38889 36.38889 36.38889 36.38889 36.38889
 [9] 36.38889 26.38889 26.38889 26.38889 26.38889 26.38889 26.38889 26.38889
[17] 26.38889 26.38889 21.66667 21.66667 21.66667 21.66667 21.66667 21.66667
[25] 21.66667 21.66667 21.66667 36.38889 36.38889 36.38889 36.38889 36.38889
[33] 36.38889 36.38889 36.38889 36.38889 26.38889 26.38889 26.38889 26.38889
[41] 26.38889 26.38889 26.38889 26.38889 26.38889 21.66667 21.66667 21.66667
[49] 21.66667 21.66667 21.66667 21.66667 21.66667 21.66667
> ave(breaks, tension, FUN = function(x) mean(x, trim = 0.1))
 [1] 35.6875 35.6875 35.6875 35.6875 35.6875 35.6875 35.6875 35.6875 35.6875
[10] 26.3125 26.3125 26.3125 26.3125 26.3125 26.3125 26.3125 26.3125 26.3125
[19] 21.0625 21.0625 21.0625 21.0625 21.0625 21.0625 21.0625 21.0625 21.0625
[28] 35.6875 35.6875 35.6875 35.6875 35.6875 35.6875 35.6875 35.6875 35.6875
[37] 26.3125 26.3125 26.3125 26.3125 26.3125 26.3125 26.3125 26.3125 26.3125
[46] 21.0625 21.0625 21.0625 21.0625 21.0625 21.0625 21.0625 21.0625 21.0625
> plot(breaks, main =
+      "ave( Warpbreaks )  for   wool  x  tension  combinations")
> lines(ave(breaks, wool, tension              ), type = "s", col = "blue")
> lines(ave(breaks, wool, tension, FUN = median), type = "s", col = "green")
> legend(40, 70, c("mean", "median"), lty = 1,
+       col = c("blue","green"), bg = "gray90")
> detach()
> 
> # Running index per group
> (g <- sample(c("u","s","e","R"), 24, replace = TRUE))
 [1] "u" "R" "e" "u" "s" "u" "e" "e" "s" "s" "e" "e" "u" "u" "u" "s" "s" "s" "s"
[20] "e" "u" "e" "u" "u"
> ave(seq_along(g), g, FUN = seq_along)
 [1] 1 1 1 2 1 3 2 3 2 3 4 5 4 5 6 4 5 6 7 6 7 7 8 9
> 
> 
> 
> cleanEx()
> nameEx("bandwidth")
> ### * bandwidth
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: bandwidth
> ### Title: Bandwidth Selectors for Kernel Density Estimation
> ### Aliases: bw.nrd0 bw.nrd bw.ucv bw.bcv bw.SJ
> ### Keywords: distribution smooth
> 
> ### ** Examples
> 
> require(graphics)
> 
> plot(density(precip, n = 1000))
> rug(precip)
> lines(density(precip, bw = "nrd"), col = 2)
> lines(density(precip, bw = "ucv"), col = 3)
> lines(density(precip, bw = "bcv"), col = 4)
Warning in bw.bcv(x) : minimum occurred at one end of the range
> lines(density(precip, bw = "SJ-ste"), col = 5)
> lines(density(precip, bw = "SJ-dpi"), col = 6)
> legend(55, 0.035,
+        legend = c("nrd0", "nrd", "ucv", "bcv", "SJ-ste", "SJ-dpi"),
+        col = 1:6, lty = 1)
> 
> 
> 
> cleanEx()
> nameEx("bartlett.test")
> ### * bartlett.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: bartlett.test
> ### Title: Bartlett Test of Homogeneity of Variances
> ### Aliases: bartlett.test bartlett.test.default bartlett.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> require(graphics)
> 
> plot(count ~ spray, data = InsectSprays)
> bartlett.test(InsectSprays$count, InsectSprays$spray)

	Bartlett test of homogeneity of variances

data:  InsectSprays$count and InsectSprays$spray
Bartlett's K-squared = 25.96, df = 5, p-value = 9.085e-05

> bartlett.test(count ~ spray, data = InsectSprays)

	Bartlett test of homogeneity of variances

data:  count by spray
Bartlett's K-squared = 25.96, df = 5, p-value = 9.085e-05

> 
> 
> 
> cleanEx()
> nameEx("binom.test")
> ### * binom.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: binom.test
> ### Title: Exact Binomial Test
> ### Aliases: binom.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Conover (1971), p. 97f.
> ## Under (the assumption of) simple Mendelian inheritance, a cross
> ##  between plants of two particular genotypes produces progeny 1/4 of
> ##  which are "dwarf" and 3/4 of which are "giant", respectively.
> ##  In an experiment to determine if this assumption is reasonable, a
> ##  cross results in progeny having 243 dwarf and 682 giant plants.
> ##  If "giant" is taken as success, the null hypothesis is that p =
> ##  3/4 and the alternative that p != 3/4.
> binom.test(c(682, 243), p = 3/4)

	Exact binomial test

data:  c(682, 243)
number of successes = 682, number of trials = 925, p-value = 0.3825
alternative hypothesis: true probability of success is not equal to 0.75
95 percent confidence interval:
 0.7076683 0.7654066
sample estimates:
probability of success 
             0.7372973 

> binom.test(682, 682 + 243, p = 3/4)   # The same.

	Exact binomial test

data:  682 and 682 + 243
number of successes = 682, number of trials = 925, p-value = 0.3825
alternative hypothesis: true probability of success is not equal to 0.75
95 percent confidence interval:
 0.7076683 0.7654066
sample estimates:
probability of success 
             0.7372973 

> ## => Data are in agreement with the null hypothesis.
> 
> 
> 
> cleanEx()
> nameEx("biplot.princomp")
> ### * biplot.princomp
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: biplot.princomp
> ### Title: Biplot for Principal Components
> ### Aliases: biplot.princomp biplot.prcomp
> ### Keywords: multivariate hplot
> 
> ### ** Examples
> 
> require(graphics)
> biplot(princomp(USArrests))
> 
> 
> 
> cleanEx()
> nameEx("birthday")
> ### * birthday
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: birthday
> ### Title: Probability of coincidences
> ### Aliases: qbirthday pbirthday
> ### Keywords: distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## the standard version
> qbirthday() # 23
[1] 23
> ## probability of > 2 people with the same birthday
> pbirthday(23, coincident = 3)
[1] 0.01441541
> 
> ## examples from Diaconis & Mosteller p. 858.
> ## 'coincidence' is that husband, wife, daughter all born on the 16th
> qbirthday(classes = 30, coincident = 3) # approximately 18
[1] 18
> qbirthday(coincident = 4)  # exact value 187
[1] 187
> qbirthday(coincident = 10) # exact value 1181
[1] 1179
> 
> ## same 4-digit PIN number
> qbirthday(classes = 10^4)
[1] 119
> 
> ## 0.9 probability of three or more coincident birthdays
> qbirthday(coincident = 3, prob = 0.9)
[1] 135
> 
> ## Chance of 4 or more coincident birthdays in 150 people
> pbirthday(150, coincident = 4)
[1] 0.2690146
> 
> ## 100 or more coincident birthdays in 1000 people: very rare
> pbirthday(1000, coincident = 100)
[1] 1.531434e-113
> 
> 
> 
> cleanEx()
> nameEx("box.test")
> ### * box.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: Box.test
> ### Title: Box-Pierce and Ljung-Box Tests
> ### Aliases: Box.test
> ### Keywords: ts
> 
> ### ** Examples
> 
> x <- rnorm (100)
> Box.test (x, lag = 1)

	Box-Pierce test

data:  x
X-squared = 0.0013332, df = 1, p-value = 0.9709

> Box.test (x, lag = 1, type = "Ljung")

	Box-Ljung test

data:  x
X-squared = 0.0013736, df = 1, p-value = 0.9704

> 
> 
> 
> cleanEx()
> nameEx("cancor")
> ### * cancor
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: cancor
> ### Title: Canonical Correlations
> ### Aliases: cancor
> ### Keywords: multivariate
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("case.names")
> ### * case.names
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: case+variable.names
> ### Title: Case and Variable Names of Fitted Models
> ### Aliases: case.names case.names.lm variable.names variable.names.lm
> ### Keywords: regression models
> 
> ### ** Examples
> 
> x <- 1:20
> y <-  setNames(x + (x/4 - 2)^3 + rnorm(20, sd = 3),
+                paste("O", x, sep = "."))
> ww <- rep(1, 20); ww[13] <- 0
> summary(lmxy <- lm(y ~ x + I(x^2)+I(x^3) + I((x-10)^2), weights = ww),
+         correlation = TRUE)

Call:
lm(formula = y ~ x + I(x^2) + I(x^3) + I((x - 10)^2), weights = ww)

Weighted Residuals:
    Min      1Q  Median      3Q     Max 
-6.7160 -0.7047  0.0728  1.0174  3.9947 

Coefficients: (1 not defined because of singularities)
                Estimate Std. Error t value Pr(>|t|)    
(Intercept)   -10.967768   3.104498  -3.533 0.003013 ** 
x               5.524412   1.255354   4.401 0.000516 ***
I(x^2)         -0.542292   0.138460  -3.917 0.001374 ** 
I(x^3)          0.020905   0.004372   4.782 0.000242 ***
I((x - 10)^2)         NA         NA      NA       NA    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 2.838 on 15 degrees of freedom
Multiple R-squared:  0.9702,	Adjusted R-squared:  0.9643 
F-statistic:   163 on 3 and 15 DF,  p-value: 1.14e-11

Correlation of Coefficients:
       (Intercept) x     I(x^2)
x      -0.90                   
I(x^2)  0.80       -0.97       
I(x^3) -0.73        0.93 -0.99 

> variable.names(lmxy)
[1] "(Intercept)" "x"           "I(x^2)"      "I(x^3)"     
> variable.names(lmxy, full = TRUE)  # includes the last
[1] "(Intercept)"   "x"             "I(x^2)"        "I(x^3)"       
[5] "I((x - 10)^2)"
> case.names(lmxy)
 [1] "O.1"  "O.2"  "O.3"  "O.4"  "O.5"  "O.6"  "O.7"  "O.8"  "O.9"  "O.10"
[11] "O.11" "O.12" "O.14" "O.15" "O.16" "O.17" "O.18" "O.19" "O.20"
> case.names(lmxy, full = TRUE)      # includes the 0-weight case
 [1] "O.1"  "O.2"  "O.3"  "O.4"  "O.5"  "O.6"  "O.7"  "O.8"  "O.9"  "O.10"
[11] "O.11" "O.12" "O.13" "O.14" "O.15" "O.16" "O.17" "O.18" "O.19" "O.20"
> 
> 
> 
> cleanEx()
> nameEx("checkMFClasses")
> ### * checkMFClasses
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: .checkMFClasses
> ### Title: Functions to Check the Type of Variables passed to Model Frames
> ### Aliases: .checkMFClasses .MFclass .getXlevels
> ### Keywords: utilities
> 
> ### ** Examples
> 
> sapply(warpbreaks, .MFclass) # "numeric" plus 2 x "factor"
   breaks      wool   tension 
"numeric"  "factor"  "factor" 
> sapply(iris,       .MFclass) # 4 x "numeric" plus "factor"
Sepal.Length  Sepal.Width Petal.Length  Petal.Width      Species 
   "numeric"    "numeric"    "numeric"    "numeric"     "factor" 
> 
> mf <- model.frame(Sepal.Width ~ Species,      iris)
> mc <- model.frame(Sepal.Width ~ Sepal.Length, iris)
> 
> .checkMFClasses("numeric", mc) # nothing else
> .checkMFClasses(c("numeric", "factor"), mf)
> 
> ## simple .getXlevels() cases :
> (xl <- .getXlevels(terms(mf), mf)) # a list with one entry " $ Species" with 3 levels:
$Species
[1] "setosa"     "versicolor" "virginica" 

> stopifnot(exprs = {
+   identical(xl$Species, levels(iris$Species))
+   identical(.getXlevels(terms(mc), mc), xl[0]) # a empty named list, as no factors
+   is.null(.getXlevels(terms(x~x), list(x=1)))
+ })
> 
> 
> 
> cleanEx()
> nameEx("chisq.test")
> ### * chisq.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: chisq.test
> ### Title: Pearson's Chi-squared Test for Count Data
> ### Aliases: chisq.test
> ### Keywords: htest distribution
> 
> ### ** Examples
> 
> 
> ## From Agresti(2007) p.39
> M <- as.table(rbind(c(762, 327, 468), c(484, 239, 477)))
> dimnames(M) <- list(gender = c("F", "M"),
+                     party = c("Democrat","Independent", "Republican"))
> (Xsq <- chisq.test(M))  # Prints test summary

	Pearson's Chi-squared test

data:  M
X-squared = 30.07, df = 2, p-value = 2.954e-07

> Xsq$observed   # observed counts (same as M)
      party
gender Democrat Independent Republican
     F      762         327        468
     M      484         239        477
> Xsq$expected   # expected counts under the null
      party
gender Democrat Independent Republican
     F 703.6714    319.6453   533.6834
     M 542.3286    246.3547   411.3166
> Xsq$residuals  # Pearson residuals
      party
gender   Democrat Independent Republican
     F  2.1988558   0.4113702 -2.8432397
     M -2.5046695  -0.4685829  3.2386734
> Xsq$stdres     # standardized residuals
      party
gender   Democrat Independent Republican
     F  4.5020535   0.6994517 -5.3159455
     M -4.5020535  -0.6994517  5.3159455
> 
> 
> ## Effect of simulating p-values
> x <- matrix(c(12, 5, 7, 7), ncol = 2)
> chisq.test(x)$p.value           # 0.4233
[1] 0.4233054
> chisq.test(x, simulate.p.value = TRUE, B = 10000)$p.value
[1] 0.2935706
>                                 # around 0.29!
> 
> ## Testing for population probabilities
> ## Case A. Tabulated data
> x <- c(A = 20, B = 15, C = 25)
> chisq.test(x)

	Chi-squared test for given probabilities

data:  x
X-squared = 2.5, df = 2, p-value = 0.2865

> chisq.test(as.table(x))             # the same

	Chi-squared test for given probabilities

data:  as.table(x)
X-squared = 2.5, df = 2, p-value = 0.2865

> x <- c(89,37,30,28,2)
> p <- c(40,20,20,15,5)
> try(
+ chisq.test(x, p = p)                # gives an error
+ )
Error in chisq.test(x, p = p) : probabilities must sum to 1.
> chisq.test(x, p = p, rescale.p = TRUE)

	Chi-squared test for given probabilities

data:  x
X-squared = 9.9901, df = 4, p-value = 0.04059

>                                 # works
> p <- c(0.40,0.20,0.20,0.19,0.01)
>                                 # Expected count in category 5
>                                 # is 1.86 < 5 ==> chi square approx.
> chisq.test(x, p = p)            #               maybe doubtful, but is ok!
Warning in chisq.test(x, p = p) :
  Chi-squared approximation may be incorrect

	Chi-squared test for given probabilities

data:  x
X-squared = 5.7947, df = 4, p-value = 0.215

> chisq.test(x, p = p, simulate.p.value = TRUE)

	Chi-squared test for given probabilities with simulated p-value (based
	on 2000 replicates)

data:  x
X-squared = 5.7947, df = NA, p-value = 0.2029

> 
> ## Case B. Raw data
> x <- trunc(5 * runif(100))
> chisq.test(table(x))            # NOT 'chisq.test(x)'!

	Chi-squared test for given probabilities

data:  table(x)
X-squared = 4.3, df = 4, p-value = 0.3669

> 
> 
> 
> cleanEx()
> nameEx("cmdscale")
> ### * cmdscale
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: cmdscale
> ### Title: Classical (Metric) Multidimensional Scaling
> ### Aliases: cmdscale
> ### Keywords: multivariate
> 
> ### ** Examples
> 
> require(graphics)
> 
> loc <- cmdscale(eurodist)
> x <- loc[, 1]
> y <- -loc[, 2] # reflect so North is at the top
> ## note asp = 1, to ensure Euclidean distances are represented correctly
> plot(x, y, type = "n", xlab = "", ylab = "", asp = 1, axes = FALSE,
+      main = "cmdscale(eurodist)")
> text(x, y, rownames(loc), cex = 0.6)
> 
> 
> 
> cleanEx()
> nameEx("coef")
> ### * coef
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: coef
> ### Title: Extract Model Coefficients
> ### Aliases: coef coefficients coef.default coef.aov
> ### Keywords: regression models
> 
> ### ** Examples
> 
> x <- 1:5; coef(lm(c(1:3, 7, 6) ~ x))
(Intercept)           x 
       -0.7         1.5 
> 
> 
> 
> cleanEx()
> nameEx("complete.cases")
> ### * complete.cases
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: complete.cases
> ### Title: Find Complete Cases
> ### Aliases: complete.cases
> ### Keywords: NA logic
> 
> ### ** Examples
> 
> x <- airquality[, -1] # x is a regression design matrix
> y <- airquality[,  1] # y is the corresponding response
> 
> stopifnot(complete.cases(y) != is.na(y))
> ok <- complete.cases(x, y)
> sum(!ok) # how many are not "ok" ?
[1] 42
> x <- x[ok,]
> y <- y[ok]
> 
> 
> 
> cleanEx()
> nameEx("confint")
> ### * confint
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: confint
> ### Title: Confidence Intervals for Model Parameters
> ### Aliases: confint confint.default confint.lm confint.glm confint.nls
> ### Keywords: models
> 
> ### ** Examples
> 
> fit <- lm(100/mpg ~ disp + hp + wt + am, data = mtcars)
> confint(fit)
                   2.5 %      97.5 %
(Intercept) -0.774822875 2.256118188
disp        -0.002867999 0.008273849
hp          -0.001400580 0.011949674
wt           0.380088737 1.622517536
am          -0.614677730 0.926307310
> confint(fit, "wt")
       2.5 %   97.5 %
wt 0.3800887 1.622518
> 
> ## from example(glm)
> counts <- c(18,17,15,20,10,20,25,13,12)
> outcome <- gl(3, 1, 9); treatment <- gl(3, 3)
> glm.D93 <- glm(counts ~ outcome + treatment, family = poisson())
> confint(glm.D93) 
Waiting for profiling to be done...
                 2.5 %      97.5 %
(Intercept)  2.6958215  3.36655581
outcome2    -0.8577018 -0.06255840
outcome3    -0.6753696  0.08244089
treatment2  -0.3932548  0.39325483
treatment3  -0.3932548  0.39325483
> confint.default(glm.D93)  # based on asymptotic normality
                 2.5 %      97.5 %
(Intercept)  2.7095672  3.37947764
outcome2    -0.8505027 -0.05800787
outcome3    -0.6707552  0.08478093
treatment2  -0.3919928  0.39199279
treatment3  -0.3919928  0.39199279
> 
> 
> cleanEx()
> nameEx("constrOptim")
> ### * constrOptim
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: constrOptim
> ### Title: Linearly Constrained Optimization
> ### Aliases: constrOptim
> ### Keywords: optimize
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("contrast")
> ### * contrast
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: contrast
> ### Title: (Possibly Sparse) Contrast Matrices
> ### Aliases: contr.helmert contr.poly contr.sum contr.treatment contr.SAS
> ### Keywords: design regression array
> 
> ### ** Examples
> 
> (cH <- contr.helmert(4))
  [,1] [,2] [,3]
1   -1   -1   -1
2    1   -1   -1
3    0    2   -1
4    0    0    3
> apply(cH, 2, sum) # column sums are 0
[1] 0 0 0
> crossprod(cH) # diagonal -- columns are orthogonal
     [,1] [,2] [,3]
[1,]    2    0    0
[2,]    0    6    0
[3,]    0    0   12
> contr.helmert(4, contrasts = FALSE) # just the 4 x 4 identity matrix
  1 2 3 4
1 1 0 0 0
2 0 1 0 0
3 0 0 1 0
4 0 0 0 1
> 
> (cT <- contr.treatment(5))
  2 3 4 5
1 0 0 0 0
2 1 0 0 0
3 0 1 0 0
4 0 0 1 0
5 0 0 0 1
> all(crossprod(cT) == diag(4)) # TRUE: even orthonormal
[1] TRUE
> 
> (cT. <- contr.SAS(5))
  1 2 3 4
1 1 0 0 0
2 0 1 0 0
3 0 0 1 0
4 0 0 0 1
5 0 0 0 0
> all(crossprod(cT.) == diag(4)) # TRUE
[1] TRUE
> 
> zapsmall(cP <- contr.poly(3)) # Linear and Quadratic
             .L         .Q
[1,] -0.7071068  0.4082483
[2,]  0.0000000 -0.8164966
[3,]  0.7071068  0.4082483
> zapsmall(crossprod(cP), digits = 15) # orthonormal up to fuzz
   .L .Q
.L  1  0
.Q  0  1
> 
> 
> 
> cleanEx()
> nameEx("contrasts")
> ### * contrasts
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: contrasts
> ### Title: Get and Set Contrast Matrices
> ### Aliases: contrasts contrasts<-
> ### Keywords: design regression
> 
> ### ** Examples
> 
> utils::example(factor)

factor> (ff <- factor(substring("statistics", 1:10, 1:10), levels = letters))
 [1] s t a t i s t i c s
Levels: a b c d e f g h i j k l m n o p q r s t u v w x y z

factor> as.integer(ff)      # the internal codes
 [1] 19 20  1 20  9 19 20  9  3 19

factor> (f. <- factor(ff))  # drops the levels that do not occur
 [1] s t a t i s t i c s
Levels: a c i s t

factor> ff[, drop = TRUE]   # the same, more transparently
 [1] s t a t i s t i c s
Levels: a c i s t

factor> factor(letters[1:20], labels = "letter")
 [1] letter1  letter2  letter3  letter4  letter5  letter6  letter7  letter8 
 [9] letter9  letter10 letter11 letter12 letter13 letter14 letter15 letter16
[17] letter17 letter18 letter19 letter20
20 Levels: letter1 letter2 letter3 letter4 letter5 letter6 letter7 ... letter20

factor> class(ordered(4:1)) # "ordered", inheriting from "factor"
[1] "ordered" "factor" 

factor> z <- factor(LETTERS[3:1], ordered = TRUE)

factor> ## and "relational" methods work:
factor> stopifnot(sort(z)[c(1,3)] == range(z), min(z) < max(z))

factor> ## Don't show: 
factor> of <- ordered(ff)

factor> stopifnot(identical(range(of, rev(of)), of[3:2]),
factor+ 	  identical(max(of), of[2]))

factor> ## End(Don't show)
factor> 
factor> ## suppose you want "NA" as a level, and to allow missing values.
factor> (x <- factor(c(1, 2, NA), exclude = NULL))
[1] 1    2    <NA>
Levels: 1 2 <NA>

factor> is.na(x)[2] <- TRUE

factor> x  # [1] 1    <NA> <NA>
[1] 1    <NA> <NA>
Levels: 1 2 <NA>

factor> is.na(x)
[1] FALSE  TRUE FALSE

factor> # [1] FALSE  TRUE FALSE
factor> 
factor> ## More rational, since R 3.4.0 :
factor> factor(c(1:2, NA), exclude =  "" ) # keeps <NA> , as
[1] 1    2    <NA>
Levels: 1 2 <NA>

factor> factor(c(1:2, NA), exclude = NULL) # always did
[1] 1    2    <NA>
Levels: 1 2 <NA>

factor> ## exclude = <character>
factor> z # ordered levels 'A < B < C'
[1] C B A
Levels: A < B < C

factor> factor(z, exclude = "C") # does exclude
[1] <NA> B    A   
Levels: A < B

factor> factor(z, exclude = "B") # ditto
[1] C    <NA> A   
Levels: A < C

factor> ## Now, labels maybe duplicated:
factor> ## factor() with duplicated labels allowing to "merge levels"
factor> x <- c("Man", "Male", "Man", "Lady", "Female")

factor> ## Map from 4 different values to only two levels:
factor> (xf <- factor(x, levels = c("Male", "Man" , "Lady",   "Female"),
factor+                  labels = c("Male", "Male", "Female", "Female")))
[1] Male   Male   Male   Female Female
Levels: Male Female

factor> #> [1] Male   Male   Male   Female Female
factor> #> Levels: Male Female
factor> 
factor> ## Using addNA()
factor> Month <- airquality$Month

factor> table(addNA(Month))

   5    6    7    8    9 <NA> 
  31   30   31   31   30    0 

factor> table(addNA(Month, ifany = TRUE))

 5  6  7  8  9 
31 30 31 31 30 
> fff <- ff[, drop = TRUE]  # reduce to 5 levels.
> contrasts(fff) # treatment contrasts by default
  c i s t
a 0 0 0 0
c 1 0 0 0
i 0 1 0 0
s 0 0 1 0
t 0 0 0 1
> contrasts(C(fff, sum))
  [,1] [,2] [,3] [,4]
a    1    0    0    0
c    0    1    0    0
i    0    0    1    0
s    0    0    0    1
t   -1   -1   -1   -1
> contrasts(fff, contrasts = FALSE) # the 5x5 identity matrix
  a c i s t
a 1 0 0 0 0
c 0 1 0 0 0
i 0 0 1 0 0
s 0 0 0 1 0
t 0 0 0 0 1
> 
> contrasts(fff) <- contr.sum(5); contrasts(fff)  # set sum contrasts
  [,1] [,2] [,3] [,4]
a    1    0    0    0
c    0    1    0    0
i    0    0    1    0
s    0    0    0    1
t   -1   -1   -1   -1
> contrasts(fff, 2) <- contr.sum(5); contrasts(fff)  # set 2 contrasts
  [,1] [,2]
a    1    0
c    0    1
i    0    0
s    0    0
t   -1   -1
> # supply 2 contrasts, compute 2 more to make full set of 4.
> contrasts(fff) <- contr.sum(5)[, 1:2]; contrasts(fff)
  [,1] [,2]       [,3]       [,4]
a    1    0 -0.2471257  0.2688164
c    0    1 -0.2471257  0.2688164
i    0    0 -0.1498721 -0.8817814
s    0    0  0.8912491  0.0753323
t   -1   -1 -0.2471257  0.2688164
> 
> 
> 
> cleanEx()
> nameEx("convolve")
> ### * convolve
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: convolve
> ### Title: Convolution of Sequences via FFT
> ### Aliases: convolve
> ### Keywords: math dplot
> 
> ### ** Examples
> 
> require(graphics)
> 
> x <- c(0,0,0,100,0,0,0)
> y <- c(0,0,1, 2 ,1,0,0)/4
> zapsmall(convolve(x, y))         #  *NOT* what you first thought.
[1] 50 25  0  0  0  0 25
> zapsmall(convolve(x, y[3:5], type = "f")) # rather
[1]  0 25 50 25  0
> x <- rnorm(50)
> y <- rnorm(50)
> # Circular convolution *has* this symmetry:
> all.equal(convolve(x, y, conj = FALSE), rev(convolve(rev(y),x)))
[1] TRUE
> 
> n <- length(x <- -20:24)
> y <- (x-10)^2/1000 + rnorm(x)/8
> 
> Han <- function(y) # Hanning
+        convolve(y, c(1,2,1)/4, type = "filter")
> 
> plot(x, y, main = "Using  convolve(.) for Hanning filters")
> lines(x[-c(1  , n)      ], Han(y), col = "red")
> lines(x[-c(1:2, (n-1):n)], Han(Han(y)), lwd = 2, col = "dark blue")
> 
> 
> 
> cleanEx()
> nameEx("cophenetic")
> ### * cophenetic
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: cophenetic
> ### Title: Cophenetic Distances for a Hierarchical Clustering
> ### Aliases: cophenetic cophenetic.default cophenetic.dendrogram
> ### Keywords: cluster multivariate
> 
> ### ** Examples
> 
> require(graphics)
> 
> d1 <- dist(USArrests)
> hc <- hclust(d1, "ave")
> d2 <- cophenetic(hc)
> cor(d1, d2) # 0.7659
[1] 0.7658983
> 
> ## Example from Sneath & Sokal, Fig. 5-29, p.279
> d0 <- c(1,3.8,4.4,5.1, 4,4.2,5, 2.6,5.3, 5.4)
> attributes(d0) <- list(Size = 5, diag = TRUE)
> class(d0) <- "dist"
> names(d0) <- letters[1:5]
> d0
    1   2   3   4
2 1.0            
3 3.8 4.0        
4 4.4 4.2 2.6    
5 5.1 5.0 5.3 5.4
> utils::str(upgma <- hclust(d0, method = "average"))
List of 7
 $ merge      : int [1:4, 1:2] -1 -3 1 -5 -2 -4 2 3
 $ height     : num [1:4] 1 2.6 4.1 5.2
 $ order      : int [1:5] 5 1 2 3 4
 $ labels     : NULL
 $ method     : chr "average"
 $ call       : language hclust(d = d0, method = "average")
 $ dist.method: NULL
 - attr(*, "class")= chr "hclust"
> plot(upgma, hang = -1)
> #
> (d.coph <- cophenetic(upgma))
    1   2   3   4
2 1.0            
3 4.1 4.1        
4 4.1 4.1 2.6    
5 5.2 5.2 5.2 5.2
> cor(d0, d.coph) # 0.9911
[1] 0.9911351
> 
> 
> 
> cleanEx()
> nameEx("cor")
> ### * cor
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: cor
> ### Title: Correlation, Variance and Covariance (Matrices)
> ### Aliases: var cov cor cov2cor
> ### Keywords: univar multivariate array
> 
> ### ** Examples
> 
> var(1:10)  # 9.166667
[1] 9.166667
> 
> var(1:5, 1:5) # 2.5
[1] 2.5
> 
> ## Two simple vectors
> cor(1:10, 2:11) # == 1
[1] 1
> 
> ## Correlation Matrix of Multivariate sample:
> (Cl <- cor(longley))
             GNP.deflator       GNP Unemployed Armed.Forces Population
GNP.deflator    1.0000000 0.9915892  0.6206334    0.4647442  0.9791634
GNP             0.9915892 1.0000000  0.6042609    0.4464368  0.9910901
Unemployed      0.6206334 0.6042609  1.0000000   -0.1774206  0.6865515
Armed.Forces    0.4647442 0.4464368 -0.1774206    1.0000000  0.3644163
Population      0.9791634 0.9910901  0.6865515    0.3644163  1.0000000
Year            0.9911492 0.9952735  0.6682566    0.4172451  0.9939528
Employed        0.9708985 0.9835516  0.5024981    0.4573074  0.9603906
                  Year  Employed
GNP.deflator 0.9911492 0.9708985
GNP          0.9952735 0.9835516
Unemployed   0.6682566 0.5024981
Armed.Forces 0.4172451 0.4573074
Population   0.9939528 0.9603906
Year         1.0000000 0.9713295
Employed     0.9713295 1.0000000
> ## Graphical Correlation Matrix:
> symnum(Cl) # highly correlated
             GNP. GNP U A P Y E
GNP.deflator 1                 
GNP          B    1            
Unemployed   ,    ,   1        
Armed.Forces .    .     1      
Population   B    B   , . 1    
Year         B    B   , . B 1  
Employed     B    B   . . B B 1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> 
> ## Spearman's rho  and  Kendall's tau
> symnum(clS <- cor(longley, method = "spearman"))
             GNP. GNP U A P Y E
GNP.deflator 1                 
GNP          B    1            
Unemployed   ,    ,   1        
Armed.Forces          . 1      
Population   B    B   ,   1    
Year         B    B   ,   1 1  
Employed     B    B   .   B B 1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(clK <- cor(longley, method = "kendall"))
             GNP. GNP U A P Y E
GNP.deflator 1                 
GNP          B    1            
Unemployed   .    .   1        
Armed.Forces            1      
Population   B    B   .   1    
Year         B    B   .   1 1  
Employed     *    *   .   + + 1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> ## How much do they differ?
> i <- lower.tri(Cl)
> cor(cbind(P = Cl[i], S = clS[i], K = clK[i]))
          P         S         K
P 1.0000000 0.9802390 0.9572562
S 0.9802390 1.0000000 0.9742171
K 0.9572562 0.9742171 1.0000000
> 
> 
> ## cov2cor() scales a covariance matrix by its diagonal
> ##           to become the correlation matrix.
> cov2cor # see the function definition {and learn ..}
function (V) 
{
    p <- (d <- dim(V))[1L]
    if (!is.numeric(V) || length(d) != 2L || p != d[2L]) 
        stop("'V' is not a square numeric matrix")
    pos <- !is.na(Is <- D <- diag(V, names = FALSE)) & D > 0
    Is[pos] <- sqrt(1/D[pos])
    Is[!pos] <- NaN
    if (any(!pos) || any(!is.finite(Is))) 
        warning("diag(V) had non-positive or NA entries; the non-finite result may be dubious")
    r <- V
    r[] <- Is * V * rep(Is, each = p)
    if (p) 
        r[seq.int(from = 1L, by = p + 1L, length.out = p)] <- 1
    r
}
<bytecode: 0x12090f330>
<environment: namespace:stats>
> stopifnot(all.equal(Cl, cov2cor(cov(longley))),
+           all.equal(cor(longley, method = "kendall"),
+             cov2cor(cov(longley, method = "kendall"))))
> 
> ##--- Missing value treatment:
> C1 <- cov(swiss)
> range(eigen(C1, only.values = TRUE)$values) # 6.19        1921
[1]    6.191249 1921.562488
> 
> ## swM := "swiss" with  3 "missing"s :
> swM <- swiss
> colnames(swM) <- abbreviate(colnames(swiss), minlength=6)
> swM[1,2] <- swM[7,3] <- swM[25,5] <- NA # create 3 "missing"
> 
> ## Consider all 5 "use" cases :
> (C. <- cov(swM)) # use="everything"  quite a few NA's in cov.matrix
          Frtlty Agrclt Exmntn     Eductn Cathlc    Infn.M
Frtlty 156.04250     NA     NA -79.729510     NA 15.156193
Agrclt        NA     NA     NA         NA     NA        NA
Exmntn        NA     NA     NA         NA     NA        NA
Eductn -79.72951     NA     NA  92.456059     NA -2.781684
Cathlc        NA     NA     NA         NA     NA        NA
Infn.M  15.15619     NA     NA  -2.781684     NA  8.483802
> try(cov(swM, use = "all")) # Error: missing obs...
Error in cov(swM, use = "all") : missing observations in cov/cor
> C2 <- cov(swM, use = "complete")
> stopifnot(identical(C2, cov(swM, use = "na.or.complete")))
> range(eigen(C2, only.values = TRUE)$values) # 6.46   1930
[1]    6.462385 1930.505982
> C3 <- cov(swM, use = "pairwise")
> range(eigen(C3, only.values = TRUE)$values) # 6.19   1938
[1]    6.194469 1938.033663
> 
> ## Kendall's tau doesn't change much:
> symnum(Rc <- cor(swM, method = "kendall", use = "complete"))
       F A Ex Ed C I
Frtlty 1            
Agrclt   1          
Exmntn . . 1        
Eductn . . .  1     
Cathlc     .     1  
Infn.M             1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(Rp <- cor(swM, method = "kendall", use = "pairwise"))
       F A Ex Ed C I
Frtlty 1            
Agrclt   1          
Exmntn . . 1        
Eductn . . .  1     
Cathlc     .     1  
Infn.M .           1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(R. <- cor(swiss, method = "kendall"))
                 F A Ex Ed C I
Fertility        1            
Agriculture        1          
Examination      . . 1        
Education        . . .  1     
Catholic             .     1  
Infant.Mortality .           1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> 
> ## "pairwise" is closer componentwise,
> summary(abs(c(1 - Rp/R.)))
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
0.00000 0.00000 0.04481 0.09573 0.15214 0.53941 
> summary(abs(c(1 - Rc/R.)))
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
0.00000 0.02020 0.08482 0.50675 0.16192 7.08509 
> 
> ## but "complete" is closer in Eigen space:
> EV <- function(m) eigen(m, only.values=TRUE)$values
> summary(abs(1 - EV(Rp)/EV(R.)) / abs(1 - EV(Rc)/EV(R.)))
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
 0.8942  1.1464  1.2452  1.3732  1.3722  2.3265 
> 
> 
> 
> cleanEx()
> nameEx("cor.test")
> ### * cor.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: cor.test
> ### Title: Test for Association/Correlation Between Paired Samples
> ### Aliases: cor.test cor.test.default cor.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Hollander & Wolfe (1973), p. 187f.
> ## Assessment of tuna quality.  We compare the Hunter L measure of
> ##  lightness to the averages of consumer panel scores (recoded as
> ##  integer values from 1 to 6 and averaged over 80 such values) in
> ##  9 lots of canned tuna.
> 
> x <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1)
> y <- c( 2.6,  3.1,  2.5,  5.0,  3.6,  4.0,  5.2,  2.8,  3.8)
> 
> ##  The alternative hypothesis of interest is that the
> ##  Hunter L value is positively associated with the panel score.
> 
> cor.test(x, y, method = "kendall", alternative = "greater")

	Kendall's rank correlation tau

data:  x and y
T = 26, p-value = 0.05972
alternative hypothesis: true tau is greater than 0
sample estimates:
      tau 
0.4444444 

> ## => p=0.05972
> 
> cor.test(x, y, method = "kendall", alternative = "greater",
+          exact = FALSE) # using large sample approximation

	Kendall's rank correlation tau

data:  x and y
z = 1.6681, p-value = 0.04765
alternative hypothesis: true tau is greater than 0
sample estimates:
      tau 
0.4444444 

> ## => p=0.04765
> 
> ## Compare this to
> cor.test(x, y, method = "spearm", alternative = "g")

	Spearman's rank correlation rho

data:  x and y
S = 48, p-value = 0.0484
alternative hypothesis: true rho is greater than 0
sample estimates:
rho 
0.6 

> cor.test(x, y,                    alternative = "g")

	Pearson's product-moment correlation

data:  x and y
t = 1.8411, df = 7, p-value = 0.05409
alternative hypothesis: true correlation is greater than 0
95 percent confidence interval:
 -0.02223023  1.00000000
sample estimates:
      cor 
0.5711816 

> 
> ## Formula interface.
> require(graphics)
> pairs(USJudgeRatings)
> cor.test(~ CONT + INTG, data = USJudgeRatings)

	Pearson's product-moment correlation

data:  CONT and INTG
t = -0.8605, df = 41, p-value = 0.3945
alternative hypothesis: true correlation is not equal to 0
95 percent confidence interval:
 -0.4168591  0.1741182
sample estimates:
       cor 
-0.1331909 

> 
> 
> 
> cleanEx()
> nameEx("cov.wt")
> ### * cov.wt
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: cov.wt
> ### Title: Weighted Covariance Matrices
> ### Aliases: cov.wt
> ### Keywords: multivariate
> 
> ### ** Examples
> 
>  (xy <- cbind(x = 1:10, y = c(1:3, 8:5, 8:10)))
       x  y
 [1,]  1  1
 [2,]  2  2
 [3,]  3  3
 [4,]  4  8
 [5,]  5  7
 [6,]  6  6
 [7,]  7  5
 [8,]  8  8
 [9,]  9  9
[10,] 10 10
>  w1 <- c(0,0,0,1,1,1,1,1,0,0)
>  cov.wt(xy, wt = w1) # i.e. method = "unbiased"
$cov
     x    y
x  2.5 -0.5
y -0.5  1.7

$center
  x   y 
6.0 6.8 

$n.obs
[1] 10

$wt
 [1] 0.0 0.0 0.0 0.2 0.2 0.2 0.2 0.2 0.0 0.0

>  cov.wt(xy, wt = w1, method = "ML", cor = TRUE)
$cov
     x     y
x  2.0 -0.40
y -0.4  1.36

$center
  x   y 
6.0 6.8 

$n.obs
[1] 10

$wt
 [1] 0.0 0.0 0.0 0.2 0.2 0.2 0.2 0.2 0.0 0.0

$cor
           x          y
x  1.0000000 -0.2425356
y -0.2425356  1.0000000

> 
> 
> 
> cleanEx()
> nameEx("cpgram")
> ### * cpgram
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: cpgram
> ### Title: Plot Cumulative Periodogram
> ### Aliases: cpgram
> ### Keywords: ts hplot
> 
> ### ** Examples
> 
> require(graphics)
> 
> par(pty = "s", mfrow = c(1,2))
> cpgram(lh)
> lh.ar <- ar(lh, order.max = 9)
> cpgram(lh.ar$resid, main = "AR(3) fit to lh")
> 
> cpgram(ldeaths)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("cutree")
> ### * cutree
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: cutree
> ### Title: Cut a Tree into Groups of Data
> ### Aliases: cutree
> ### Keywords: multivariate cluster
> 
> ### ** Examples
> 
> hc <- hclust(dist(USArrests))
> 
> cutree(hc, k = 1:5) #k = 1 is trivial
               1 2 3 4 5
Alabama        1 1 1 1 1
Alaska         1 1 1 1 1
Arizona        1 1 1 1 1
Arkansas       1 2 2 2 2
California     1 1 1 1 1
Colorado       1 2 2 2 2
Connecticut    1 2 3 3 3
Delaware       1 1 1 1 1
Florida        1 1 1 4 4
Georgia        1 2 2 2 2
Hawaii         1 2 3 3 5
Idaho          1 2 3 3 3
Illinois       1 1 1 1 1
Indiana        1 2 3 3 3
Iowa           1 2 3 3 5
Kansas         1 2 3 3 3
Kentucky       1 2 3 3 3
Louisiana      1 1 1 1 1
Maine          1 2 3 3 5
Maryland       1 1 1 1 1
Massachusetts  1 2 2 2 2
Michigan       1 1 1 1 1
Minnesota      1 2 3 3 5
Mississippi    1 1 1 1 1
Missouri       1 2 2 2 2
Montana        1 2 3 3 3
Nebraska       1 2 3 3 3
Nevada         1 1 1 1 1
New Hampshire  1 2 3 3 5
New Jersey     1 2 2 2 2
New Mexico     1 1 1 1 1
New York       1 1 1 1 1
North Carolina 1 1 1 4 4
North Dakota   1 2 3 3 5
Ohio           1 2 3 3 3
Oklahoma       1 2 2 2 2
Oregon         1 2 2 2 2
Pennsylvania   1 2 3 3 3
Rhode Island   1 2 2 2 2
South Carolina 1 1 1 1 1
South Dakota   1 2 3 3 5
Tennessee      1 2 2 2 2
Texas          1 2 2 2 2
Utah           1 2 3 3 3
Vermont        1 2 3 3 5
Virginia       1 2 2 2 2
Washington     1 2 2 2 2
West Virginia  1 2 3 3 5
Wisconsin      1 2 3 3 5
Wyoming        1 2 2 2 2
> cutree(hc, h = 250)
       Alabama         Alaska        Arizona       Arkansas     California 
             1              1              1              2              1 
      Colorado    Connecticut       Delaware        Florida        Georgia 
             2              2              1              1              2 
        Hawaii          Idaho       Illinois        Indiana           Iowa 
             2              2              1              2              2 
        Kansas       Kentucky      Louisiana          Maine       Maryland 
             2              2              1              2              1 
 Massachusetts       Michigan      Minnesota    Mississippi       Missouri 
             2              1              2              1              2 
       Montana       Nebraska         Nevada  New Hampshire     New Jersey 
             2              2              1              2              2 
    New Mexico       New York North Carolina   North Dakota           Ohio 
             1              1              1              2              2 
      Oklahoma         Oregon   Pennsylvania   Rhode Island South Carolina 
             2              2              2              2              1 
  South Dakota      Tennessee          Texas           Utah        Vermont 
             2              2              2              2              2 
      Virginia     Washington  West Virginia      Wisconsin        Wyoming 
             2              2              2              2              2 
> 
> ## Compare the 2 and 4 grouping:
> g24 <- cutree(hc, k = c(2,4))
> table(grp2 = g24[,"2"], grp4 = g24[,"4"])
    grp4
grp2  1  2  3  4
   1 14  0  0  2
   2  0 14 20  0
> 
> 
> 
> cleanEx()
> nameEx("decompose")
> ### * decompose
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: decompose
> ### Title: Classical Seasonal Decomposition by Moving Averages
> ### Aliases: decompose plot.decomposed.ts
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> m <- decompose(co2)
> m$figure
 [1] -0.05359649  0.61055921  1.37564693  2.51682018  3.00028509  2.32921053
 [7]  0.81293860 -1.25052632 -3.05458333 -3.25194079 -2.06969298 -0.96512061
> plot(m)
> 
> ## example taken from Kendall/Stuart
> x <- c(-50, 175, 149, 214, 247, 237, 225, 329, 729, 809,
+        530, 489, 540, 457, 195, 176, 337, 239, 128, 102, 232, 429, 3,
+        98, 43, -141, -77, -13, 125, 361, -45, 184)
> x <- ts(x, start = c(1951, 1), end = c(1958, 4), frequency = 4)
> m <- decompose(x)
> ## seasonal figure: 6.25, 8.62, -8.84, -6.03
> round(decompose(x)$figure / 10, 2)
[1]  6.25  8.62 -8.84 -6.03
> 
> 
> 
> cleanEx()
> nameEx("delete.response")
> ### * delete.response
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: delete.response
> ### Title: Modify Terms Objects
> ### Aliases: reformulate drop.terms delete.response [.terms
> ### Keywords: programming
> 
> ### ** Examples
> 
> ff <- y ~ z + x + w
> tt <- terms(ff)
> tt
y ~ z + x + w
attr(,"variables")
list(y, z, x, w)
attr(,"factors")
  z x w
y 0 0 0
z 1 0 0
x 0 1 0
w 0 0 1
attr(,"term.labels")
[1] "z" "x" "w"
attr(,"order")
[1] 1 1 1
attr(,"intercept")
[1] 1
attr(,"response")
[1] 1
attr(,".Environment")
<environment: R_GlobalEnv>
> delete.response(tt)
~z + x + w
attr(,"variables")
list(z, x, w)
attr(,"factors")
  z x w
z 1 0 0
x 0 1 0
w 0 0 1
attr(,"term.labels")
[1] "z" "x" "w"
attr(,"order")
[1] 1 1 1
attr(,"intercept")
[1] 1
attr(,"response")
[1] 0
attr(,".Environment")
<environment: R_GlobalEnv>
> drop.terms(tt, 2:3, keep.response = TRUE)
y ~ z
attr(,"variables")
list(y, z)
attr(,"factors")
  z
y 0
z 1
attr(,"term.labels")
[1] "z"
attr(,"order")
[1] 1
attr(,"intercept")
[1] 1
attr(,"response")
[1] 1
attr(,".Environment")
<environment: R_GlobalEnv>
> tt[-1]
y ~ x + w
attr(,"variables")
list(y, x, w)
attr(,"factors")
  x w
y 0 0
x 1 0
w 0 1
attr(,"term.labels")
[1] "x" "w"
attr(,"order")
[1] 1 1
attr(,"intercept")
[1] 1
attr(,"response")
[1] 1
attr(,".Environment")
<environment: R_GlobalEnv>
> tt[2:3]
y ~ x + w
attr(,"variables")
list(y, x, w)
attr(,"factors")
  x w
y 0 0
x 1 0
w 0 1
attr(,"term.labels")
[1] "x" "w"
attr(,"order")
[1] 1 1
attr(,"intercept")
[1] 1
attr(,"response")
[1] 1
attr(,".Environment")
<environment: R_GlobalEnv>
> reformulate(attr(tt, "term.labels"))
~z + x + w
> 
> ## keep LHS :
> reformulate("x*w", ff[[2]])
y ~ x * w
> fS <- surv(ft, case) ~ a + b
> reformulate(c("a", "b*f"), fS[[2]])
surv(ft, case) ~ a + b * f
> 
> ## using non-syntactic names:
> reformulate(c("`P/E`", "`% Growth`"), response = as.name("+-"))
`+-` ~ `P/E` + `% Growth`
> 
> x <- c("a name", "another name")
> tryCatch( reformulate(x), error = function(e) "Syntax error." )
[1] "Syntax error."
> ## rather backquote the strings in x :
> reformulate(sprintf("`%s`", x))
~`a name` + `another name`
> 
> stopifnot(identical(      ~ var, reformulate("var")),
+           identical(~ a + b + c, reformulate(letters[1:3])),
+           identical(  y ~ a + b, reformulate(letters[1:2], "y"))
+          )
> 
> 
> 
> cleanEx()
> nameEx("dendrapply")
> ### * dendrapply
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: dendrapply
> ### Title: Apply a Function to All Nodes of a Dendrogram
> ### Aliases: dendrapply
> ### Keywords: iteration
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## a smallish simple dendrogram
> dhc <- as.dendrogram(hc <- hclust(dist(USArrests), "ave"))
> (dhc21 <- dhc[[2]][[1]])
'dendrogram' with 2 branches and 14 members total, at height 44.83793 
> 
> ## too simple:
> dendrapply(dhc21, function(n) utils::str(attributes(n)))
List of 4
 $ members : int 14
 $ midpoint: num 6.8
 $ height  : num 44.8
 $ class   : chr "dendrogram"
List of 4
 $ members : int 8
 $ midpoint: num 3.34
 $ height  : num 26.7
 $ class   : chr "dendrogram"
List of 4
 $ members : int 5
 $ midpoint: num 0.938
 $ height  : num 16.4
 $ class   : chr "dendrogram"
List of 5
 $ members: int 1
 $ height : num 0
 $ label  : chr "Washington"
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 4
 $ members : int 4
 $ midpoint: num 0.875
 $ height  : num 12.9
 $ class   : chr "dendrogram"
List of 5
 $ members: int 1
 $ height : num 0
 $ label  : chr "Oregon"
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 4
 $ members : int 3
 $ midpoint: num 0.75
 $ height  : num 10.7
 $ class   : chr "dendrogram"
List of 5
 $ members: int 1
 $ height : num 0
 $ label  : chr "Wyoming"
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 4
 $ members : int 2
 $ midpoint: num 0.5
 $ height  : num 7.36
 $ class   : chr "dendrogram"
List of 5
 $ label  : chr "Oklahoma"
 $ members: int 1
 $ height : num 0
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 5
 $ label  : chr "Virginia"
 $ members: int 1
 $ height : num 0
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 4
 $ members : int 3
 $ midpoint: num 0.75
 $ height  : num 22.6
 $ class   : chr "dendrogram"
List of 5
 $ members: int 1
 $ height : num 0
 $ label  : chr "Rhode Island"
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 4
 $ members : int 2
 $ midpoint: num 0.5
 $ height  : num 11.5
 $ class   : chr "dendrogram"
List of 5
 $ label  : chr "Massachusetts"
 $ members: int 1
 $ height : num 0
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 5
 $ label  : chr "New Jersey"
 $ members: int 1
 $ height : num 0
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 4
 $ members : int 6
 $ midpoint: num 2.25
 $ height  : num 29.1
 $ class   : chr "dendrogram"
List of 4
 $ members : int 3
 $ midpoint: num 0.75
 $ height  : num 20.2
 $ class   : chr "dendrogram"
List of 5
 $ members: int 1
 $ height : num 0
 $ label  : chr "Missouri"
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 4
 $ members : int 2
 $ midpoint: num 0.5
 $ height  : num 12.6
 $ class   : chr "dendrogram"
List of 5
 $ label  : chr "Arkansas"
 $ members: int 1
 $ height : num 0
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 5
 $ label  : chr "Tennessee"
 $ members: int 1
 $ height : num 0
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 4
 $ members : int 3
 $ midpoint: num 0.75
 $ height  : num 24
 $ class   : chr "dendrogram"
List of 5
 $ members: int 1
 $ height : num 0
 $ label  : chr "Georgia"
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 4
 $ members : int 2
 $ midpoint: num 0.5
 $ height  : num 14.5
 $ class   : chr "dendrogram"
List of 5
 $ label  : chr "Colorado"
 $ members: int 1
 $ height : num 0
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
List of 5
 $ label  : chr "Texas"
 $ members: int 1
 $ height : num 0
 $ leaf   : logi TRUE
 $ class  : chr "dendrogram"
[[1]]
[[1]][[1]]
[[1]][[1]][[1]]
NULL

[[1]][[1]][[2]]
[[1]][[1]][[2]][[1]]
NULL

[[1]][[1]][[2]][[2]]
[[1]][[1]][[2]][[2]][[1]]
NULL

[[1]][[1]][[2]][[2]][[2]]
[[1]][[1]][[2]][[2]][[2]][[1]]
NULL

[[1]][[1]][[2]][[2]][[2]][[2]]
NULL





[[1]][[2]]
[[1]][[2]][[1]]
NULL

[[1]][[2]][[2]]
[[1]][[2]][[2]][[1]]
NULL

[[1]][[2]][[2]][[2]]
NULL




[[2]]
[[2]][[1]]
[[2]][[1]][[1]]
NULL

[[2]][[1]][[2]]
[[2]][[1]][[2]][[1]]
NULL

[[2]][[1]][[2]][[2]]
NULL



[[2]][[2]]
[[2]][[2]][[1]]
NULL

[[2]][[2]][[2]]
[[2]][[2]][[2]][[1]]
NULL

[[2]][[2]][[2]][[2]]
NULL




> 
> ## toy example to set colored leaf labels :
> local({
+   colLab <<- function(n) {
+       if(is.leaf(n)) {
+         a <- attributes(n)
+         i <<- i+1
+         attr(n, "nodePar") <-
+             c(a$nodePar, list(lab.col = mycols[i], lab.font = i%%3))
+       }
+       n
+   }
+   mycols <- grDevices::rainbow(attr(dhc21,"members"))
+   i <- 0
+  })
> dL <- dendrapply(dhc21, colLab)
> op <- par(mfrow = 2:1)
>  plot(dhc21)
>  plot(dL) ## --> colored labels!
> par(op)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("dendrogram")
> ### * dendrogram
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: dendrogram
> ### Title: General Tree Structures
> ### Aliases: dendrogram as.dendrogram as.dendrogram.dendrogram
> ###   as.dendrogram.hclust as.hclust.dendrogram cut.dendrogram
> ###   [[.dendrogram merge.dendrogram nobs.dendrogram plot.dendrogram
> ###   print.dendrogram rev.dendrogram str.dendrogram is.leaf
> ### Keywords: multivariate tree hplot
> 
> ### ** Examples
> 
> require(graphics); require(utils)
> 
> hc <- hclust(dist(USArrests), "ave")
> (dend1 <- as.dendrogram(hc)) # "print()" method
'dendrogram' with 2 branches and 50 members total, at height 152.314 
> str(dend1)          # "str()" method
--[dendrogram w/ 2 branches and 50 members at h = 152]
  |--[dendrogram w/ 2 branches and 16 members at h = 77.6]
  |  |--[dendrogram w/ 2 branches and 2 members at h = 38.5]
  |  |  |--leaf "Florida" 
  |  |  `--leaf "North Carolina" 
  |  `--[dendrogram w/ 2 branches and 14 members at h = 44.3]
  |     |--[dendrogram w/ 2 branches and 4 members at h = 28]
  |     |  |--leaf "California" 
  |     |  `--[dendrogram w/ 2 branches and 3 members at h = 15.5]
  |     |     |--leaf "Maryland" 
  |     |     `--[dendrogram w/ 2 branches and 2 members at h = 13.9]
  |     |        |--leaf "Arizona" 
  |     |        `--leaf "New Mexico" 
  |     `--[dendrogram w/ 2 branches and 10 members at h = 39.4]
  |        |--[dendrogram w/ 2 branches and 7 members at h = 26.4]
  |        |  |--[dendrogram w/ 2 branches and 3 members at h = 16.9]
  |        |  |  |--leaf "Delaware" 
  |        |  |  `--[dendrogram w/ 2 branches and 2 members at h = 15.5]
  |        |  |     |--leaf "Alabama" 
  |        |  |     `--leaf "Louisiana" 
  |        |  `--[dendrogram w/ 2 branches and 4 members at h = 18.4]
  |        |     |--[dendrogram w/ 2 branches and 2 members at h = 6.24]
  |        |     |  |--leaf "Illinois" 
  |        |     |  `--leaf "New York" 
  |        |     `--[dendrogram w/ 2 branches and 2 members at h = 13.3]
  |        |        |--leaf "Michigan" 
  |        |        `--leaf "Nevada" 
  |        `--[dendrogram w/ 2 branches and 3 members at h = 28.1]
  |           |--leaf "Alaska" 
  |           `--[dendrogram w/ 2 branches and 2 members at h = 21.2]
  |              |--leaf "Mississippi" 
  |              `--leaf "South Carolina" 
  `--[dendrogram w/ 2 branches and 34 members at h = 89.2]
     |--[dendrogram w/ 2 branches and 14 members at h = 44.8]
     |  |--[dendrogram w/ 2 branches and 8 members at h = 26.7]
     |  |  |--[dendrogram w/ 2 branches and 5 members at h = 16.4]
     |  |  |  |--leaf "Washington" 
     |  |  |  `--[dendrogram w/ 2 branches and 4 members at h = 12.9]
     |  |  |     |--leaf "Oregon" 
     |  |  |     `--[dendrogram w/ 2 branches and 3 members at h = 10.7]
     |  |  |        |--leaf "Wyoming" 
     |  |  |        `--[dendrogram w/ 2 branches and 2 members at h = 7.36]
     |  |  |           |--leaf "Oklahoma" 
     |  |  |           `--leaf "Virginia" 
     |  |  `--[dendrogram w/ 2 branches and 3 members at h = 22.6]
     |  |     |--leaf "Rhode Island" 
     |  |     `--[dendrogram w/ 2 branches and 2 members at h = 11.5]
     |  |        |--leaf "Massachusetts" 
     |  |        `--leaf "New Jersey" 
     |  `--[dendrogram w/ 2 branches and 6 members at h = 29.1]
     |     |--[dendrogram w/ 2 branches and 3 members at h = 20.2]
     |     |  |--leaf "Missouri" 
     |     |  `--[dendrogram w/ 2 branches and 2 members at h = 12.6]
     |     |     |--leaf "Arkansas" 
     |     |     `--leaf "Tennessee" 
     |     `--[dendrogram w/ 2 branches and 3 members at h = 24]
     |        |--leaf "Georgia" 
     |        `--[dendrogram w/ 2 branches and 2 members at h = 14.5]
     |           |--leaf "Colorado" 
     |           `--leaf "Texas" 
     `--[dendrogram w/ 2 branches and 20 members at h = 54.7]
        |--[dendrogram w/ 2 branches and 10 members at h = 20.6]
        |  |--[dendrogram w/ 2 branches and 4 members at h = 15]
        |  |  |--leaf "Idaho" 
        |  |  `--[dendrogram w/ 2 branches and 3 members at h = 12.4]
        |  |     |--leaf "Nebraska" 
        |  |     `--[dendrogram w/ 2 branches and 2 members at h = 3.83]
        |  |        |--leaf "Kentucky" 
        |  |        `--leaf "Montana" 
        |  `--[dendrogram w/ 2 branches and 6 members at h = 15.1]
        |     |--[dendrogram w/ 2 branches and 2 members at h = 6.64]
        |     |  |--leaf "Ohio" 
        |     |  `--leaf "Utah" 
        |     `--[dendrogram w/ 2 branches and 4 members at h = 13.4]
        |        |--[dendrogram w/ 2 branches and 2 members at h = 3.93]
        |        |  |--leaf "Indiana" 
        |        |  `--leaf "Kansas" 
        |        `--[dendrogram w/ 2 branches and 2 members at h = 8.03]
        |           |--leaf "Connecticut" 
        |           `--leaf "Pennsylvania" 
        `--[dendrogram w/ 2 branches and 10 members at h = 41.1]
           |--leaf "Hawaii" 
           `--[dendrogram w/ 2 branches and 9 members at h = 33.1]
              |--[dendrogram w/ 2 branches and 3 members at h = 10.8]
              |  |--leaf "West Virginia" 
              |  `--[dendrogram w/ 2 branches and 2 members at h = 8.54]
              |     |--leaf "Maine" 
              |     `--leaf "South Dakota" 
              `--[dendrogram w/ 2 branches and 6 members at h = 27.8]
                 |--[dendrogram w/ 2 branches and 2 members at h = 13]
                 |  |--leaf "North Dakota" 
                 |  `--leaf "Vermont" 
                 `--[dendrogram w/ 2 branches and 4 members at h = 19]
                    |--leaf "Minnesota" 
                    `--[dendrogram w/ 2 branches and 3 members at h = 10.2]
                       |--leaf "Wisconsin" 
                       `--[dendrogram w/ 2 branches and 2 members at h = 2.29]
                          |--leaf "Iowa" 
                          `--leaf "New Hampshire" 
> str(dend1, max.level = 2, last.str =  "'") # only the first two sub-levels
--[dendrogram w/ 2 branches and 50 members at h = 152]
  |--[dendrogram w/ 2 branches and 16 members at h = 77.6]
  |  |--[dendrogram w/ 2 branches and 2 members at h = 38.5] ..
  |  '--[dendrogram w/ 2 branches and 14 members at h = 44.3] ..
  '--[dendrogram w/ 2 branches and 34 members at h = 89.2]
     |--[dendrogram w/ 2 branches and 14 members at h = 44.8] ..
     '--[dendrogram w/ 2 branches and 20 members at h = 54.7] ..
> oo <- options(str.dendrogram.last = "\\") # yet another possibility
> str(dend1, max.level = 2) # only the first two sub-levels
--[dendrogram w/ 2 branches and 50 members at h = 152]
  |--[dendrogram w/ 2 branches and 16 members at h = 77.6]
  |  |--[dendrogram w/ 2 branches and 2 members at h = 38.5] ..
  |  --[dendrogram w/ 2 branches and 14 members at h = 44.3] ..
  --[dendrogram w/ 2 branches and 34 members at h = 89.2]
     |--[dendrogram w/ 2 branches and 14 members at h = 44.8] ..
     --[dendrogram w/ 2 branches and 20 members at h = 54.7] ..
> options(oo)  # .. resetting them
> 
> op <- par(mfrow =  c(2,2), mar = c(5,2,1,4))
> plot(dend1)
> ## "triangle" type and show inner nodes:
> plot(dend1, nodePar = list(pch = c(1,NA), cex = 0.8, lab.cex = 0.8),
+       type = "t", center = TRUE)
> plot(dend1, edgePar = list(col = 1:2, lty = 2:3),
+      dLeaf = 1, edge.root = TRUE)
> plot(dend1, nodePar = list(pch = 2:1, cex = .4*2:1, col = 2:3),
+      horiz = TRUE)
> 
> ## simple test for as.hclust() as the inverse of as.dendrogram():
> stopifnot(identical(as.hclust(dend1)[1:4], hc[1:4]))
> 
> dend2 <- cut(dend1, h = 70)
> ## leaves are wrong horizontally in R 4.0 and earlier:
> plot(dend2$upper)
> plot(dend2$upper, nodePar = list(pch = c(1,7), col = 2:1))
> ##  dend2$lower is *NOT* a dendrogram, but a list of .. :
> plot(dend2$lower[[3]], nodePar = list(col = 4), horiz = TRUE, type = "tr")
> ## "inner" and "leaf" edges in different type & color :
> plot(dend2$lower[[2]], nodePar = list(col = 1),   # non empty list
+      edgePar = list(lty = 1:2, col = 2:1), edge.root = TRUE)
> par(op)
> d3 <- dend2$lower[[2]][[2]][[1]]
> stopifnot(identical(d3, dend2$lower[[2]][[c(2,1)]]))
> str(d3, last.str = "'")
--[dendrogram w/ 2 branches and 7 members at h = 26.4]
  |--[dendrogram w/ 2 branches and 3 members at h = 16.9]
  |  |--leaf "Delaware" 
  |  '--[dendrogram w/ 2 branches and 2 members at h = 15.5]
  |     |--leaf "Alabama" 
  |     '--leaf "Louisiana" 
  '--[dendrogram w/ 2 branches and 4 members at h = 18.4]
     |--[dendrogram w/ 2 branches and 2 members at h = 6.24]
     |  |--leaf "Illinois" 
     |  '--leaf "New York" 
     '--[dendrogram w/ 2 branches and 2 members at h = 13.3]
        |--leaf "Michigan" 
        '--leaf "Nevada" 
> 
> ## to peek at the inner structure "if you must", use '[..]' indexing :
> str(d3[2][[1]]) ## or the full
List of 2
 $ :List of 2
  ..$ : int 13
  .. ..- attr(*, "label")= chr "Illinois"
  .. ..- attr(*, "members")= int 1
  .. ..- attr(*, "height")= num 0
  .. ..- attr(*, "leaf")= logi TRUE
  ..$ : int 32
  .. ..- attr(*, "label")= chr "New York"
  .. ..- attr(*, "members")= int 1
  .. ..- attr(*, "height")= num 0
  .. ..- attr(*, "leaf")= logi TRUE
  ..- attr(*, "members")= int 2
  ..- attr(*, "midpoint")= num 0.5
  ..- attr(*, "height")= num 6.24
 $ :List of 2
  ..$ : int 22
  .. ..- attr(*, "label")= chr "Michigan"
  .. ..- attr(*, "members")= int 1
  .. ..- attr(*, "height")= num 0
  .. ..- attr(*, "leaf")= logi TRUE
  ..$ : int 28
  .. ..- attr(*, "label")= chr "Nevada"
  .. ..- attr(*, "members")= int 1
  .. ..- attr(*, "height")= num 0
  .. ..- attr(*, "leaf")= logi TRUE
  ..- attr(*, "members")= int 2
  ..- attr(*, "midpoint")= num 0.5
  ..- attr(*, "height")= num 13.3
 - attr(*, "members")= int 4
 - attr(*, "midpoint")= num 1.5
 - attr(*, "height")= num 18.4
> str(d3[])
List of 2
 $ :List of 2
  ..$ : int 8
  .. ..- attr(*, "members")= int 1
  .. ..- attr(*, "height")= num 0
  .. ..- attr(*, "label")= chr "Delaware"
  .. ..- attr(*, "leaf")= logi TRUE
  ..$ :List of 2
  .. ..$ : int 1
  .. .. ..- attr(*, "label")= chr "Alabama"
  .. .. ..- attr(*, "members")= int 1
  .. .. ..- attr(*, "height")= num 0
  .. .. ..- attr(*, "leaf")= logi TRUE
  .. ..$ : int 18
  .. .. ..- attr(*, "label")= chr "Louisiana"
  .. .. ..- attr(*, "members")= int 1
  .. .. ..- attr(*, "height")= num 0
  .. .. ..- attr(*, "leaf")= logi TRUE
  .. ..- attr(*, "members")= int 2
  .. ..- attr(*, "midpoint")= num 0.5
  .. ..- attr(*, "height")= num 15.5
  ..- attr(*, "members")= int 3
  ..- attr(*, "midpoint")= num 0.75
  ..- attr(*, "height")= num 16.9
 $ :List of 2
  ..$ :List of 2
  .. ..$ : int 13
  .. .. ..- attr(*, "label")= chr "Illinois"
  .. .. ..- attr(*, "members")= int 1
  .. .. ..- attr(*, "height")= num 0
  .. .. ..- attr(*, "leaf")= logi TRUE
  .. ..$ : int 32
  .. .. ..- attr(*, "label")= chr "New York"
  .. .. ..- attr(*, "members")= int 1
  .. .. ..- attr(*, "height")= num 0
  .. .. ..- attr(*, "leaf")= logi TRUE
  .. ..- attr(*, "members")= int 2
  .. ..- attr(*, "midpoint")= num 0.5
  .. ..- attr(*, "height")= num 6.24
  ..$ :List of 2
  .. ..$ : int 22
  .. .. ..- attr(*, "label")= chr "Michigan"
  .. .. ..- attr(*, "members")= int 1
  .. .. ..- attr(*, "height")= num 0
  .. .. ..- attr(*, "leaf")= logi TRUE
  .. ..$ : int 28
  .. .. ..- attr(*, "label")= chr "Nevada"
  .. .. ..- attr(*, "members")= int 1
  .. .. ..- attr(*, "height")= num 0
  .. .. ..- attr(*, "leaf")= logi TRUE
  .. ..- attr(*, "members")= int 2
  .. ..- attr(*, "midpoint")= num 0.5
  .. ..- attr(*, "height")= num 13.3
  ..- attr(*, "members")= int 4
  ..- attr(*, "midpoint")= num 1.5
  ..- attr(*, "height")= num 18.4
 - attr(*, "members")= int 7
 - attr(*, "midpoint")= num 2.62
 - attr(*, "height")= num 26.4
> 
> ## merge() to join dendrograms:
> (d13 <- merge(dend2$lower[[1]], dend2$lower[[3]]))
'dendrogram' with 2 branches and 16 members total, at height 49.32173 
> ## merge() all parts back (using default 'height' instead of original one):
> den.1 <- Reduce(merge, dend2$lower)
> ## or merge() all four parts at same height --> 4 branches (!)
> d. <- merge(dend2$lower[[1]], dend2$lower[[2]], dend2$lower[[3]],
+             dend2$lower[[4]])
> ## (with a warning) or the same using  do.call :
> stopifnot(identical(d., do.call(merge, dend2$lower)))
> plot(d., main = "merge(d1, d2, d3, d4)  |->  dendrogram with a 4-split")
> 
> ## "Zoom" in to the first dendrogram :
> plot(dend1, xlim = c(1,20), ylim = c(1,50))
> 
> nP <- list(col = 3:2, cex = c(2.0, 0.75), pch =  21:22,
+            bg =  c("light blue", "pink"),
+            lab.cex = 0.75, lab.col = "tomato")
> plot(d3, nodePar= nP, edgePar = list(col = "gray", lwd = 2), horiz = TRUE)
> addE <- function(n) {
+       if(!is.leaf(n)) {
+         attr(n, "edgePar") <- list(p.col = "plum")
+         attr(n, "edgetext") <- paste(attr(n,"members"),"members")
+       }
+       n
+ }
> d3e <- dendrapply(d3, addE)
> plot(d3e, nodePar =  nP)
> plot(d3e, nodePar =  nP, leaflab = "textlike")
> 
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("density")
> ### * density
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: density
> ### Title: Kernel Density Estimation
> ### Aliases: density density.default
> ### Keywords: distribution smooth
> 
> ### ** Examples
> 
> require(graphics)
> 
> plot(density(c(-20, rep(0,98), 20)), xlim = c(-4, 4))  # IQR = 0
> 
> # The Old Faithful geyser data
> d <- density(faithful$eruptions, bw = "sj")
> d

Call:
	density.default(x = faithful$eruptions, bw = "sj")

Data: faithful$eruptions (272 obs.);	Bandwidth 'bw' = 0.14

       x               y            
 Min.   :1.180   Min.   :0.0001814  
 1st Qu.:2.265   1st Qu.:0.0421820  
 Median :3.350   Median :0.1706817  
 Mean   :3.350   Mean   :0.2299476  
 3rd Qu.:4.435   3rd Qu.:0.4130131  
 Max.   :5.520   Max.   :0.5940669  
> plot(d)
> 
> plot(d, type = "n")
> polygon(d, col = "wheat")
> 
> ## Missing values:
> x <- xx <- faithful$eruptions
> x[i.out <- sample(length(x), 10)] <- NA
> doR <- density(x, bw = 0.15, na.rm = TRUE)
> lines(doR, col = "blue")
> points(xx[i.out], rep(0.01, 10))
> 
> ## Weighted observations:
> fe <- sort(faithful$eruptions) # has quite a few non-unique values
> ## use 'counts / n' as weights:
> dw <- density(unique(fe), weights = table(fe)/length(fe), bw = d$bw)
> utils::str(dw) ## smaller n: only 126, but identical estimate:
List of 8
 $ x         : num [1:512] 1.18 1.19 1.2 1.21 1.21 ...
 $ y         : num [1:512] 0.000181 0.000221 0.000267 0.000325 0.000393 ...
 $ bw        : num 0.14
 $ n         : int 126
 $ old.coords: logi FALSE
 $ call      : language density.default(x = unique(fe), bw = d$bw, weights = table(fe)/length(fe))
 $ data.name : chr "unique(fe)"
 $ has.na    : logi FALSE
 - attr(*, "class")= chr "density"
> stopifnot(all.equal(d[1:3], dw[1:3]))
> 
> ## simulation from a density() fit:
> # a kernel density fit is an equally-weighted mixture.
> fit <- density(xx)
> N <- 1e6
> x.new <- rnorm(N, sample(xx, size = N, replace = TRUE), fit$bw)
> plot(fit)
> lines(density(x.new), col = "blue")
> 
> 
> ## The available kernels:
> (kernels <- eval(formals(density.default)$kernel))
[1] "gaussian"     "epanechnikov" "rectangular"  "triangular"   "biweight"    
[6] "cosine"       "optcosine"   
> 
> ## show the kernels in the R parametrization
> plot (density(0, bw = 1), xlab = "",
+       main = "R's density() kernels with bw = 1")
> for(i in 2:length(kernels))
+    lines(density(0, bw = 1, kernel =  kernels[i]), col = i)
> legend(1.5,.4, legend = kernels, col = seq(kernels),
+        lty = 1, cex = .8, y.intersp = 1)
> 
> ## show the kernels in the S parametrization
> plot(density(0, from = -1.2, to = 1.2, width = 2, kernel = "gaussian"),
+      type = "l", ylim = c(0, 1), xlab = "",
+      main = "R's density() kernels with width = 1")
> for(i in 2:length(kernels))
+    lines(density(0, width = 2, kernel =  kernels[i]), col = i)
> legend(0.6, 1.0, legend = kernels, col = seq(kernels), lty = 1)
> 
> ##-------- Semi-advanced theoretic from here on -------------
> 
> ## Explore the old.coords TRUE --> FALSE change:
> set.seed(7); x <- runif(2^12) # N = 4096
> den  <- density(x) # -> grid of n = 512 points
> den0 <- density(x, old.coords = TRUE)
> summary(den0$y / den$y) # 1.001 ... 1.011
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
  1.001   1.001   1.001   1.002   1.001   1.011 
> summary(    den0$y / den$y - 1) # ~= 1/(2n-2)
     Min.   1st Qu.    Median      Mean   3rd Qu.      Max. 
0.0006768 0.0009353 0.0009784 0.0017606 0.0010349 0.0107757 
> summary(1/ (den0$y / den$y - 1))# ~=    2n-2 = 1022
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
   92.8   966.3  1022.1   918.9  1069.2  1477.6 
> corr0 <- 1 - 1/(2*512-2) # 1 - 1/(2n-2)
> all.equal(den$y, den0$y * corr0)# ~ 0.0001
[1] "Mean relative difference: 9.480177e-05"
> plot(den$x, (den0$y - den$y)/den$y, type='o', cex=1/4)
> title("relative error of density(runif(2^12), old.coords=TRUE)")
> abline(h = 1/1022, v = range(x), lty=2); axis(2, at=1/1022, "1/(2n-2)", las=1)
> 
> 
> ## The R[K] for our kernels:
> (RKs <- cbind(sapply(kernels,
+                      function(k) density(kernel = k, give.Rkern = TRUE))))
                  [,1]
gaussian     0.2820948
epanechnikov 0.2683282
rectangular  0.2886751
triangular   0.2721655
biweight     0.2699746
cosine       0.2711340
optcosine    0.2684756
> 100*round(RKs["epanechnikov",]/RKs, 4) ## Efficiencies
               [,1]
gaussian      95.12
epanechnikov 100.00
rectangular   92.95
triangular    98.59
biweight      99.39
cosine        98.97
optcosine     99.95
> 
> bw <- bw.SJ(precip) ## sensible automatic choice
> plot(density(precip, bw = bw),
+      main = "same sd bandwidths, 7 different kernels")
> for(i in 2:length(kernels))
+    lines(density(precip, bw = bw, kernel = kernels[i]), col = i)
> 
> ## Bandwidth Adjustment for "Exactly Equivalent Kernels"
> h.f <- sapply(kernels, function(k)density(kernel = k, give.Rkern = TRUE))
> (h.f <- (h.f["gaussian"] / h.f)^ .2)
    gaussian epanechnikov  rectangular   triangular     biweight       cosine 
   1.0000000    1.0100567    0.9953989    1.0071923    1.0088217    1.0079575 
   optcosine 
   1.0099458 
> ## -> 1, 1.01, .995, 1.007,... close to 1 => adjustment barely visible..
> 
> plot(density(precip, bw = bw),
+      main = "equivalent bandwidths, 7 different kernels")
> for(i in 2:length(kernels))
+    lines(density(precip, bw = bw, adjust = h.f[i], kernel = kernels[i]),
+          col = i)
> legend(55, 0.035, legend = kernels, col = seq(kernels), lty = 1)
> 
> 
> 
> cleanEx()
> nameEx("deriv")
> ### * deriv
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: deriv
> ### Title: Symbolic and Algorithmic Derivatives of Simple Expressions
> ### Aliases: D deriv deriv.default deriv.formula deriv3 deriv3.default
> ###   deriv3.formula
> ### Keywords: math nonlinear
> 
> ### ** Examples
> 
> ## formula argument :
> dx2x <- deriv(~ x^2, "x") ; dx2x
expression({
    .value <- x^2
    .grad <- array(0, c(length(.value), 1L), list(NULL, c("x")))
    .grad[, "x"] <- 2 * x
    attr(.value, "gradient") <- .grad
    .value
})
> ## Not run: 
> ##D expression({
> ##D          .value <- x^2
> ##D          .grad <- array(0, c(length(.value), 1), list(NULL, c("x")))
> ##D          .grad[, "x"] <- 2 * x
> ##D          attr(.value, "gradient") <- .grad
> ##D          .value
> ##D })
> ## End(Not run)
> mode(dx2x)
[1] "expression"
> x <- -1:2
> eval(dx2x)
[1] 1 0 1 4
attr(,"gradient")
      x
[1,] -2
[2,]  0
[3,]  2
[4,]  4
> 
> ## Something 'tougher':
> trig.exp <- expression(sin(cos(x + y^2)))
> ( D.sc <- D(trig.exp, "x") )
-(cos(cos(x + y^2)) * sin(x + y^2))
> all.equal(D(trig.exp[[1]], "x"), D.sc)
[1] TRUE
> 
> ( dxy <- deriv(trig.exp, c("x", "y")) )
expression({
    .expr2 <- x + y^2
    .expr3 <- cos(.expr2)
    .expr5 <- cos(.expr3)
    .expr6 <- sin(.expr2)
    .value <- sin(.expr3)
    .grad <- array(0, c(length(.value), 2L), list(NULL, c("x", 
        "y")))
    .grad[, "x"] <- -(.expr5 * .expr6)
    .grad[, "y"] <- -(.expr5 * (.expr6 * (2 * y)))
    attr(.value, "gradient") <- .grad
    .value
})
> y <- 1
> eval(dxy)
[1]  0.8414710  0.5143953 -0.4042392 -0.8360219
attr(,"gradient")
              x         y
[1,]  0.0000000  0.000000
[2,] -0.7216061 -1.443212
[3,] -0.8316919 -1.663384
[4,] -0.0774320 -0.154864
> eval(D.sc)
[1]  0.0000000 -0.7216061 -0.8316919 -0.0774320
> 
> ## function returned:
> deriv((y ~ sin(cos(x) * y)), c("x","y"), function.arg = TRUE)
function (x, y) 
{
    .expr1 <- cos(x)
    .expr2 <- .expr1 * y
    .expr4 <- cos(.expr2)
    .value <- sin(.expr2)
    .grad <- array(0, c(length(.value), 2L), list(NULL, c("x", 
        "y")))
    .grad[, "x"] <- -(.expr4 * (sin(x) * y))
    .grad[, "y"] <- .expr4 * .expr1
    attr(.value, "gradient") <- .grad
    .value
}
> 
> ## function with defaulted arguments:
> (fx <- deriv(y ~ b0 + b1 * 2^(-x/th), c("b0", "b1", "th"),
+              function(b0, b1, th, x = 1:7){} ) )
function (b0, b1, th, x = 1:7) 
{
    .expr3 <- 2^(-x/th)
    .value <- b0 + b1 * .expr3
    .grad <- array(0, c(length(.value), 3L), list(NULL, c("b0", 
        "b1", "th")))
    .grad[, "b0"] <- 1
    .grad[, "b1"] <- .expr3
    .grad[, "th"] <- b1 * (.expr3 * (log(2) * (x/th^2)))
    attr(.value, "gradient") <- .grad
    .value
}
> fx(2, 3, 4)
[1] 4.522689 4.121320 3.783811 3.500000 3.261345 3.060660 2.891905
attr(,"gradient")
     b0        b1        th
[1,]  1 0.8408964 0.1092872
[2,]  1 0.7071068 0.1837984
[3,]  1 0.5946036 0.2318331
[4,]  1 0.5000000 0.2599302
[5,]  1 0.4204482 0.2732180
[6,]  1 0.3535534 0.2756976
[7,]  1 0.2973018 0.2704720
> 
> ## First derivative
> 
> D(expression(x^2), "x")
2 * x
> stopifnot(D(as.name("x"), "x") == 1)
> 
> ## Higher derivatives
> deriv3(y ~ b0 + b1 * 2^(-x/th), c("b0", "b1", "th"),
+      c("b0", "b1", "th", "x") )
function (b0, b1, th, x) 
{
    .expr3 <- 2^(-x/th)
    .expr6 <- log(2)
    .expr7 <- th^2
    .expr9 <- .expr6 * (x/.expr7)
    .expr10 <- .expr3 * .expr9
    .value <- b0 + b1 * .expr3
    .grad <- array(0, c(length(.value), 3L), list(NULL, c("b0", 
        "b1", "th")))
    .hessian <- array(0, c(length(.value), 3L, 3L), list(NULL, 
        c("b0", "b1", "th"), c("b0", "b1", "th")))
    .grad[, "b0"] <- 1
    .grad[, "b1"] <- .expr3
    .hessian[, "b1", "b1"] <- 0
    .hessian[, "b1", "th"] <- .hessian[, "th", "b1"] <- .expr10
    .grad[, "th"] <- b1 * .expr10
    .hessian[, "th", "th"] <- b1 * (.expr10 * .expr9 - .expr3 * 
        (.expr6 * (x * (2 * th)/.expr7^2)))
    attr(.value, "gradient") <- .grad
    attr(.value, "hessian") <- .hessian
    .value
}
> 
> ## Higher derivatives:
> DD <- function(expr, name, order = 1) {
+    if(order < 1) stop("'order' must be >= 1")
+    if(order == 1) D(expr, name)
+    else DD(D(expr, name), name, order - 1)
+ }
> DD(expression(sin(x^2)), "x", 3)
-(sin(x^2) * (2 * x) * 2 + ((cos(x^2) * (2 * x) * (2 * x) + sin(x^2) * 
    2) * (2 * x) + sin(x^2) * (2 * x) * 2))
> ## showing the limits of the internal "simplify()" :
> ## Not run: 
> ##D -sin(x^2) * (2 * x) * 2 + ((cos(x^2) * (2 * x) * (2 * x) + sin(x^2) *
> ##D     2) * (2 * x) + sin(x^2) * (2 * x) * 2)
> ## End(Not run)
> 
> ## New (R 3.4.0, 2017):
> D(quote(log1p(x^2)), "x") ## log1p(x) = log(1 + x)
2 * x/(1 + x^2)
> stopifnot(identical(
+        D(quote(log1p(x^2)), "x"),
+        D(quote(log(1+x^2)), "x")))
> D(quote(expm1(x^2)), "x") ## expm1(x) = exp(x) - 1
exp(x^2) * (2 * x)
> stopifnot(identical(
+        D(quote(expm1(x^2)), "x") -> Dex1,
+        D(quote(exp(x^2)-1), "x")),
+        identical(Dex1, quote(exp(x^2) * (2 * x))))
> 
> D(quote(sinpi(x^2)), "x") ## sinpi(x) = sin(pi*x)
cospi(x^2) * (pi * (2 * x))
> D(quote(cospi(x^2)), "x") ## cospi(x) = cos(pi*x)
-(sinpi(x^2) * (pi * (2 * x)))
> D(quote(tanpi(x^2)), "x") ## tanpi(x) = tan(pi*x)
pi * (2 * x)/cospi(x^2)^2
> 
> stopifnot(identical(D(quote(log2 (x^2)), "x"),
+                     quote(2 * x/(x^2 * log(2)))),
+           identical(D(quote(log10(x^2)), "x"),
+                     quote(2 * x/(x^2 * log(10)))))
> 
> 
> 
> 
> cleanEx()
> nameEx("diffinv")
> ### * diffinv
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: diffinv
> ### Title: Discrete Integration: Inverse of Differencing
> ### Aliases: diffinv diffinv.default diffinv.ts
> ### Keywords: ts
> 
> ### ** Examples
> 
> s <- 1:10
> d <- diff(s)
> diffinv(d, xi = 1)
 [1]  1  2  3  4  5  6  7  8  9 10
> 
> 
> 
> cleanEx()
> nameEx("dist")
> ### * dist
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: dist
> ### Title: Distance Matrix Computation
> ### Aliases: dist print.dist format.dist labels.dist as.matrix.dist as.dist
> ###   as.dist.default
> ### Keywords: multivariate cluster
> 
> ### ** Examples
> 
> require(graphics)
> 
> x <- matrix(rnorm(100), nrow = 5)
> dist(x)
         1        2        3        4
2 5.701817                           
3 6.013119 5.032069                  
4 7.276905 5.325473 5.811861         
5 6.619295 5.306750 4.945987 6.612081
> dist(x, diag = TRUE)
         1        2        3        4        5
1 0.000000                                    
2 5.701817 0.000000                           
3 6.013119 5.032069 0.000000                  
4 7.276905 5.325473 5.811861 0.000000         
5 6.619295 5.306750 4.945987 6.612081 0.000000
> dist(x, upper = TRUE)
         1        2        3        4        5
1          5.701817 6.013119 7.276905 6.619295
2 5.701817          5.032069 5.325473 5.306750
3 6.013119 5.032069          5.811861 4.945987
4 7.276905 5.325473 5.811861          6.612081
5 6.619295 5.306750 4.945987 6.612081         
> m <- as.matrix(dist(x))
> d <- as.dist(m)
> stopifnot(d == dist(x))
> 
> ## Use correlations between variables "as distance"
> dd <- as.dist((1 - cor(USJudgeRatings))/2)
> round(1000 * dd) # (prints more nicely)
     CONT INTG DMNR DILG CFMG DECI PREP FAMI ORAL WRIT PHYS
INTG  567                                                  
DMNR  577   18                                             
DILG  494   64   82                                        
CFMG  432   93   93   21                                   
DECI  457   99   98   22    9                              
PREP  494   61   72   11   21   21                         
FAMI  513   66   79   21   32   29    5                    
ORAL  506   44   47   23   25   26    8    9               
WRIT  522   46   53   20   29   27    7    5    3          
PHYS  473  129  106   94   60   64   76   78   54   72     
RTEN  517   31   28   35   36   38   25   29    9   16   47
> plot(hclust(dd)) # to see a dendrogram of clustered variables
> 
> ## example of binary and canberra distances.
> x <- c(0, 0, 1, 1, 1, 1)
> y <- c(1, 0, 1, 1, 0, 1)
> dist(rbind(x, y), method = "binary")
    x
y 0.4
> ## answer 0.4 = 2/5
> dist(rbind(x, y), method = "canberra")
    x
y 2.4
> ## answer 2 * (6/5)
> 
> ## To find the names
> labels(eurodist)
 [1] "Athens"          "Barcelona"       "Brussels"        "Calais"         
 [5] "Cherbourg"       "Cologne"         "Copenhagen"      "Geneva"         
 [9] "Gibraltar"       "Hamburg"         "Hook of Holland" "Lisbon"         
[13] "Lyons"           "Madrid"          "Marseilles"      "Milan"          
[17] "Munich"          "Paris"           "Rome"            "Stockholm"      
[21] "Vienna"         
> 
> ## Examples involving "Inf" :
> ## 1)
> x[6] <- Inf
> (m2 <- rbind(x, y))
  [,1] [,2] [,3] [,4] [,5] [,6]
x    0    0    1    1    1  Inf
y    1    0    1    1    0    1
> dist(m2, method = "binary")   # warning, answer 0.5 = 2/4
Warning in dist(m2, method = "binary") :
  treating non-finite values as NA
    x
y 0.5
> ## These all give "Inf":
> stopifnot(Inf == dist(m2, method =  "euclidean"),
+           Inf == dist(m2, method =  "maximum"),
+           Inf == dist(m2, method =  "manhattan"))
> ##  "Inf" is same as very large number:
> x1 <- x; x1[6] <- 1e100
> stopifnot(dist(cbind(x, y), method = "canberra") ==
+     print(dist(cbind(x1, y), method = "canberra")))
  1 2 3 4 5
2 2        
3 1 2      
4 1 2 0    
5 2 2 1 1  
6 1 2 1 1 2
> 
> ## 2)
> y[6] <- Inf #-> 6-th pair is excluded
> dist(rbind(x, y), method = "binary"  )   # warning; 0.5
Warning in dist(rbind(x, y), method = "binary") :
  treating non-finite values as NA
    x
y 0.5
> dist(rbind(x, y), method = "canberra"  ) # 3
  x
y 3
> dist(rbind(x, y), method = "maximum")    # 1
  x
y 1
> dist(rbind(x, y), method = "manhattan")  # 2.4
    x
y 2.4
> 
> 
> 
> cleanEx()
> nameEx("dummy.coef")
> ### * dummy.coef
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: dummy.coef
> ### Title: Extract Coefficients in Original Coding
> ### Aliases: dummy.coef dummy.coef.lm dummy.coef.aovlist
> ### Keywords: models
> 
> ### ** Examples
> 
> options(contrasts = c("contr.helmert", "contr.poly"))
> ## From Venables and Ripley (2002) p.165.
> npk.aov <- aov(yield ~ block + N*P*K, npk)
> dummy.coef(npk.aov)
Full coefficients are 
                                                                               
(Intercept):        54.875                                                     
block:                   1          2          3          4      5      6      
                    -0.850      2.575      5.900     -4.750 -4.350  1.475      
N:                       0          1                                          
                 -2.808333   2.808333                                          
P:                       0          1                                          
                 0.5916667 -0.5916667                                          
K:                       0          1                                          
                  1.991667  -1.991667                                          
N:P:                   0:0        1:0        0:1        1:1                    
                -0.9416667  0.9416667  0.9416667 -0.9416667                    
N:K:                   0:0        1:0        0:1        1:1                    
                    -1.175      1.175      1.175     -1.175                    
P:K:                   0:0        1:0        0:1        1:1                    
                 0.1416667 -0.1416667 -0.1416667  0.1416667                    
N:P:K:               0:0:0      1:0:0      0:1:0      1:1:0  0:0:1  1:0:1 0:1:1
                         0          0          0          0      0      0     0
                     
(Intercept):         
block:               
                     
N:                   
                     
P:                   
                     
K:                   
                     
N:P:                 
                     
N:K:                 
                     
P:K:                 
                     
N:P:K:          1:1:1
                    0
> 
> npk.aovE <- aov(yield ~  N*P*K + Error(block), npk)
> dummy.coef(npk.aovE)

     Error: (Intercept) 
                      
(Intercept):    54.875

     Error: block 
                                                                               
N:P:K:        0:0:0     1:0:0     0:1:0     1:1:0     0:0:1     1:0:1     0:1:1
          -1.241667  1.241667  1.241667 -1.241667  1.241667 -1.241667 -1.241667
                   
N:P:K:        1:1:1
           1.241667

     Error: Within 
                                                   
N:               0          1                      
         -2.808333   2.808333                      
P:               0          1                      
         0.5916667 -0.5916667                      
K:               0          1                      
          1.991667  -1.991667                      
N:P:           0:0        1:0        0:1        1:1
        -0.9416667  0.9416667  0.9416667 -0.9416667
N:K:           0:0        1:0        0:1        1:1
            -1.175      1.175      1.175     -1.175
P:K:           0:0        1:0        0:1        1:1
         0.1416667 -0.1416667 -0.1416667  0.1416667
> 
> 
> 
> base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly"))
> cleanEx()
> nameEx("ecdf")
> ### * ecdf
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ecdf
> ### Title: Empirical Cumulative Distribution Function
> ### Aliases: ecdf plot.ecdf print.ecdf summary.ecdf quantile.ecdf
> ### Keywords: dplot hplot
> 
> ### ** Examples
> 
> ##-- Simple didactical  ecdf  example :
> x <- rnorm(12)
> Fn <- ecdf(x)
> Fn     # a *function*
Empirical CDF 
Call: ecdf(x)
 x[1:12] = -0.83563, -0.82047, -0.62645,  ..., 1.5118, 1.5953
> Fn(x)  # returns the percentiles for x
 [1] 0.25000000 0.41666667 0.08333333 1.00000000 0.50000000 0.16666667
 [7] 0.66666667 0.83333333 0.75000000 0.33333333 0.91666667 0.58333333
> tt <- seq(-2, 2, by = 0.1)
> 12 * Fn(tt) # Fn is a 'simple' function {with values k/12}
 [1]  0  0  0  0  0  0  0  0  0  0  0  0  2  2  3  3  3  4  4  4  4  4  5  5  7
[26]  8  9  9 10 10 10 10 10 10 10 10 12 12 12 12 12
> summary(Fn)
Empirical CDF:	  12 unique values with summary
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
-0.8356 -0.3857  0.3597  0.2686  0.6164  1.5953 
> ##--> see below for graphics
> knots(Fn)  # the unique data values {12 of them if there were no ties}
 [1] -0.8356286 -0.8204684 -0.6264538 -0.3053884  0.1836433  0.3295078
 [7]  0.3898432  0.4874291  0.5757814  0.7383247  1.5117812  1.5952808
> 
> y <- round(rnorm(12), 1); y[3] <- y[1]
> Fn12 <- ecdf(y)
> Fn12
Empirical CDF 
Call: ecdf(y)
 x[1:8] =   -2.2,     -2,   -0.6,  ...,    0.8,    0.9
> knots(Fn12) # unique values (always less than 12!)
[1] -2.2 -2.0 -0.6  0.0  0.1  0.6  0.8  0.9
> summary(Fn12)
Empirical CDF:	  8 unique values with summary
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
  -2.20   -0.95    0.05   -0.30    0.65    0.90 
> summary.stepfun(Fn12)
Step function with continuity 'f'= 0 ,  8 knots with summary
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
  -2.20   -0.95    0.05   -0.30    0.65    0.90 

and	9 plateau levels (y) with summary
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
 0.0000  0.1667  0.5000  0.4630  0.6667  1.0000 
> 
> ## Advanced: What's inside the function closure?
> ls(environment(Fn12))
[1] "f"      "method" "na.rm"  "nobs"   "x"      "y"      "yleft"  "yright"
> ## "f"     "method" "na.rm"  "nobs"   "x"     "y"    "yleft"  "yright"
> utils::ls.str(environment(Fn12))
f :  num 0
method :  int 2
na.rm :  logi TRUE
nobs :  int 12
x :  num [1:8] -2.2 -2 -0.6 0 0.1 0.6 0.8 0.9
y :  num [1:8] 0.0833 0.1667 0.3333 0.5 0.5833 ...
yleft :  num 0
yright :  num 1
> stopifnot(all.equal(quantile(Fn12), quantile(y)))
> 
> ###----------------- Plotting --------------------------
> require(graphics)
> 
> op <- par(mfrow = c(3, 1), mgp = c(1.5, 0.8, 0), mar =  .1+c(3,3,2,1))
> 
> F10 <- ecdf(rnorm(10))
> summary(F10)
Empirical CDF:	  10 unique values with summary
    Min.  1st Qu.   Median     Mean  3rd Qu.     Max. 
-1.47075 -0.14254 -0.05497  0.04667  0.41037  1.35868 
> 
> plot(F10)
> plot(F10, verticals = TRUE, do.points = FALSE)
> 
> plot(Fn12 , lwd = 2) ; mtext("lwd = 2", adj = 1)
> xx <- unique(sort(c(seq(-3, 2, length.out = 201), knots(Fn12))))
> lines(xx, Fn12(xx), col = "blue")
> abline(v = knots(Fn12), lty = 2, col = "gray70")
> 
> plot(xx, Fn12(xx), type = "o", cex = .1)  #- plot.default {ugly}
> plot(Fn12, col.hor = "red", add =  TRUE)  #- plot method
> abline(v = knots(Fn12), lty = 2, col = "gray70")
> ## luxury plot
> plot(Fn12, verticals = TRUE, col.points = "blue",
+      col.hor = "red", col.vert = "bisque")
> 
> ##-- this works too (automatic call to  ecdf(.)):
> plot.ecdf(rnorm(24))
> title("via  simple  plot.ecdf(x)", adj = 1)
> 
> par(op)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("eff.aovlist")
> ### * eff.aovlist
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: eff.aovlist
> ### Title: Compute Efficiencies of Multistratum Analysis of Variance
> ### Aliases: eff.aovlist
> ### Keywords: models
> 
> ### ** Examples
> 
> ## An example from Yates (1932),
> ## a 2^3 design in 2 blocks replicated 4 times
> 
> Block <- gl(8, 4)
> A <- factor(c(0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
+               0,1,0,1,0,1,0,1,0,1,0,1))
> B <- factor(c(0,0,1,1,0,0,1,1,0,1,0,1,1,0,1,0,0,0,1,1,
+               0,0,1,1,0,0,1,1,0,0,1,1))
> C <- factor(c(0,1,1,0,1,0,0,1,0,0,1,1,0,0,1,1,0,1,0,1,
+               1,0,1,0,0,0,1,1,1,1,0,0))
> Yield <- c(101, 373, 398, 291, 312, 106, 265, 450, 106, 306, 324, 449,
+            272, 89, 407, 338, 87, 324, 279, 471, 323, 128, 423, 334,
+            131, 103, 445, 437, 324, 361, 302, 272)
> aovdat <- data.frame(Block, A, B, C, Yield)
> 
> old <- getOption("contrasts")
> options(contrasts = c("contr.helmert", "contr.poly"))
> ## IGNORE_RDIFF_BEGIN
> (fit <- aov(Yield ~ A*B*C + Error(Block), data = aovdat))

Call:
aov(formula = Yield ~ A * B * C + Error(Block), data = aovdat)

Grand Mean: 291.5938

Stratum 1: Block

Terms:
                      A:B       A:C       B:C     A:B:C Residuals
Sum of Squares   780.1250  276.1250 2556.1250  112.5000  774.0938
Deg. of Freedom         1         1         1         1         3

Residual standard error: 16.06335
Estimated effects are balanced

Stratum 2: Within

Terms:
                        A         B         C       A:B       A:C       B:C
Sum of Squares    3465.28 161170.03 278817.78     28.17   1802.67  11528.17
Deg. of Freedom         1         1         1         1         1         1
                    A:B:C Residuals
Sum of Squares      45.37   5423.28
Deg. of Freedom         1        17

Residual standard error: 17.86103
Estimated effects are balanced
> ## IGNORE_RDIFF_END
> eff.aovlist(fit)
       A B C  A:B  A:C  B:C A:B:C
Block  0 0 0 0.25 0.25 0.25  0.25
Within 1 1 1 0.75 0.75 0.75  0.75
> options(contrasts = old)
> 
> 
> 
> base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly"))
> cleanEx()
> nameEx("effects")
> ### * effects
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: effects
> ### Title: Effects from Fitted Model
> ### Aliases: effects effects.lm effects.glm
> ### Keywords: models regression
> 
> ### ** Examples
> 
> y <- c(1:3, 7, 5)
> x <- c(1:3, 6:7)
> ( ee <- effects(lm(y ~ x)) )
(Intercept)           x                                     
 -8.0498447   4.3655709   0.1483334   1.6144112  -1.2302295 
attr(,"assign")
[1] 0 1
attr(,"class")
[1] "coef"
> c( round(ee - effects(lm(y+10 ~ I(x-3.8))), 3) )
(Intercept)           x                                     
     22.361       0.000       0.000       0.000       0.000 
> # just the first is different
> 
> 
> 
> cleanEx()
> nameEx("embed")
> ### * embed
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: embed
> ### Title: Embedding a Time Series
> ### Aliases: embed
> ### Keywords: ts
> 
> ### ** Examples
> 
> x <- 1:10
> embed (x, 3)
     [,1] [,2] [,3]
[1,]    3    2    1
[2,]    4    3    2
[3,]    5    4    3
[4,]    6    5    4
[5,]    7    6    5
[6,]    8    7    6
[7,]    9    8    7
[8,]   10    9    8
> 
> 
> 
> cleanEx()
> nameEx("expand.model.frame")
> ### * expand.model.frame
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: expand.model.frame
> ### Title: Add new variables to a model frame
> ### Aliases: expand.model.frame
> ### Keywords: manip regression
> 
> ### ** Examples
> 
> model <- lm(log(Volume) ~ log(Girth) + log(Height), data = trees)
> expand.model.frame(model, ~ Girth) # prints data.frame like
   log(Volume) log(Girth) log(Height) Girth
1     2.332144   2.116256    4.248495   8.3
2     2.332144   2.151762    4.174387   8.6
3     2.322388   2.174752    4.143135   8.8
4     2.797281   2.351375    4.276666  10.5
5     2.933857   2.370244    4.394449  10.7
6     2.980619   2.379546    4.418841  10.8
7     2.747271   2.397895    4.189655  11.0
8     2.901422   2.397895    4.317488  11.0
9     3.117950   2.406945    4.382027  11.1
10    2.990720   2.415914    4.317488  11.2
11    3.186353   2.424803    4.369448  11.3
12    3.044522   2.433613    4.330733  11.4
13    3.063391   2.433613    4.330733  11.4
14    3.058707   2.459589    4.234107  11.7
15    2.949688   2.484907    4.317488  12.0
16    3.100092   2.557227    4.304065  12.9
17    3.520461   2.557227    4.442651  12.9
18    3.310543   2.587764    4.454347  13.3
19    3.246491   2.617396    4.262680  13.7
20    3.214868   2.624669    4.158883  13.8
21    3.540959   2.639057    4.356709  14.0
22    3.456317   2.653242    4.382027  14.2
23    3.591818   2.674149    4.304065  14.5
24    3.645450   2.772589    4.276666  16.0
25    3.751854   2.791165    4.343805  16.3
26    4.014580   2.850707    4.394449  17.3
27    4.019980   2.862201    4.406719  17.5
28    4.065602   2.884801    4.382027  17.9
29    3.941582   2.890372    4.382027  18.0
30    3.931826   2.890372    4.382027  18.0
31    4.343805   3.025291    4.465908  20.6
> 
> dd <- data.frame(x = 1:5, y = rnorm(5), z = c(1,2,NA,4,5))
> model <- glm(y ~ x, data = dd, subset = 1:4, na.action = na.omit)
> expand.model.frame(model, "z", na.expand = FALSE) # = default
           y x z
1 -0.6264538 1 1
2  0.1836433 2 2
4  1.5952808 4 4
> expand.model.frame(model, "z", na.expand = TRUE)
           y x  z
1 -0.6264538 1  1
2  0.1836433 2  2
3 -0.8356286 3 NA
4  1.5952808 4  4
> 
> 
> 
> cleanEx()
> nameEx("extractAIC")
> ### * extractAIC
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: extractAIC
> ### Title: Extract AIC from a Fitted Model
> ### Aliases: extractAIC
> ### Keywords: models
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("factanal")
> ### * factanal
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: factanal
> ### Title: Factor Analysis
> ### Aliases: factanal
> ### Keywords: multivariate
> 
> ### ** Examples
> 
> # A little demonstration, v2 is just v1 with noise,
> # and same for v4 vs. v3 and v6 vs. v5
> # Last four cases are there to add noise
> # and introduce a positive manifold (g factor)
> v1 <- c(1,1,1,1,1,1,1,1,1,1,3,3,3,3,3,4,5,6)
> v2 <- c(1,2,1,1,1,1,2,1,2,1,3,4,3,3,3,4,6,5)
> v3 <- c(3,3,3,3,3,1,1,1,1,1,1,1,1,1,1,5,4,6)
> v4 <- c(3,3,4,3,3,1,1,2,1,1,1,1,2,1,1,5,6,4)
> v5 <- c(1,1,1,1,1,3,3,3,3,3,1,1,1,1,1,6,4,5)
> v6 <- c(1,1,1,2,1,3,3,3,4,3,1,1,1,2,1,6,5,4)
> m1 <- cbind(v1,v2,v3,v4,v5,v6)
> cor(m1)
          v1        v2        v3        v4        v5        v6
v1 1.0000000 0.9393083 0.5128866 0.4320310 0.4664948 0.4086076
v2 0.9393083 1.0000000 0.4124441 0.4084281 0.4363925 0.4326113
v3 0.5128866 0.4124441 1.0000000 0.8770750 0.5128866 0.4320310
v4 0.4320310 0.4084281 0.8770750 1.0000000 0.4320310 0.4323259
v5 0.4664948 0.4363925 0.5128866 0.4320310 1.0000000 0.9473451
v6 0.4086076 0.4326113 0.4320310 0.4323259 0.9473451 1.0000000
> factanal(m1, factors = 3) # varimax is the default

Call:
factanal(x = m1, factors = 3)

Uniquenesses:
   v1    v2    v3    v4    v5    v6 
0.005 0.101 0.005 0.224 0.084 0.005 

Loadings:
   Factor1 Factor2 Factor3
v1 0.944   0.182   0.267  
v2 0.905   0.235   0.159  
v3 0.236   0.210   0.946  
v4 0.180   0.242   0.828  
v5 0.242   0.881   0.286  
v6 0.193   0.959   0.196  

               Factor1 Factor2 Factor3
SS loadings      1.893   1.886   1.797
Proportion Var   0.316   0.314   0.300
Cumulative Var   0.316   0.630   0.929

The degrees of freedom for the model is 0 and the fit was 0.4755 
> # The following shows the g factor as PC1
> 
> ## formula interface
> factanal(~v1+v2+v3+v4+v5+v6, factors = 3,
+          scores = "Bartlett")$scores
      Factor1    Factor2    Factor3
1  -0.9039949 -0.9308984  0.9475392
2  -0.8685952 -0.9328721  0.9352330
3  -0.9082818 -0.9320093  0.9616422
4  -1.0021975 -0.2529689  0.8178552
5  -0.9039949 -0.9308984  0.9475392
6  -0.7452711  0.7273960 -0.7884733
7  -0.7098714  0.7254223 -0.8007795
8  -0.7495580  0.7262851 -0.7743704
9  -0.8080740  1.4033517 -0.9304636
10 -0.7452711  0.7273960 -0.7884733
11  0.9272282 -0.9307506 -0.8371538
12  0.9626279 -0.9327243 -0.8494600
13  0.9229413 -0.9318615 -0.8230509
14  0.8290256 -0.2528211 -0.9668378
15  0.9272282 -0.9307506 -0.8371538
16  0.4224366  2.0453079  1.2864761
17  1.4713902  1.2947716  0.5451562
18  1.8822320  0.3086244  1.9547752
> 
> 
> 
> cleanEx()
> nameEx("factor.scope")
> ### * factor.scope
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: factor.scope
> ### Title: Compute Allowed Changes in Adding to or Dropping from a Formula
> ### Aliases: add.scope drop.scope factor.scope
> ### Keywords: models
> 
> ### ** Examples
> 
> add.scope( ~ a + b + c + a:b,  ~ (a + b + c)^3)
[1] "a:c" "b:c"
> # [1] "a:c" "b:c"
> drop.scope( ~ a + b + c + a:b)
[1] "c"   "a:b"
> # [1] "c"   "a:b"
> 
> 
> 
> cleanEx()
> nameEx("family")
> ### * family
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: family
> ### Title: Family Objects for Models
> ### Aliases: family binomial gaussian Gamma inverse.gaussian poisson quasi
> ###   quasibinomial quasipoisson
> ### Keywords: models
> 
> ### ** Examples
> 
> require(utils) # for str
> 
> nf <- gaussian()  # Normal family
> nf

Family: gaussian 
Link function: identity 

> str(nf)
List of 12
 $ family    : chr "gaussian"
 $ link      : chr "identity"
 $ linkfun   :function (mu)  
 $ linkinv   :function (eta)  
 $ variance  :function (mu)  
 $ dev.resids:function (y, mu, wt)  
 $ aic       :function (y, n, mu, wt, dev)  
 $ mu.eta    :function (eta)  
 $ initialize:  expression({  n <- rep.int(1, nobs)  if (is.null(etastart) && is.null(start) && is.null(mustart) && ((family$link| __truncated__
 $ validmu   :function (mu)  
 $ valideta  :function (eta)  
 $ dispersion: num NA
 - attr(*, "class")= chr "family"
> 
> gf <- Gamma()
> gf

Family: Gamma 
Link function: inverse 

> str(gf)
List of 13
 $ family    : chr "Gamma"
 $ link      : chr "inverse"
 $ linkfun   :function (mu)  
 $ linkinv   :function (eta)  
 $ variance  :function (mu)  
 $ dev.resids:function (y, mu, wt)  
 $ aic       :function (y, n, mu, wt, dev)  
 $ mu.eta    :function (eta)  
 $ initialize:  expression({  if (any(y <= 0))  stop("non-positive values not allowed for the 'Gamma' family")  n <- rep.int(1, n| __truncated__
 $ validmu   :function (mu)  
 $ valideta  :function (eta)  
 $ simulate  :function (object, nsim)  
 $ dispersion: num NA
 - attr(*, "class")= chr "family"
> gf$linkinv
function (eta) 
1/eta
<environment: namespace:stats>
> gf$variance(-3:4) #- == (.)^2
[1]  9  4  1  0  1  4  9 16
> 
> ## Binomial with default 'logit' link:  Check some properties visually:
> bi <- binomial()
> et <- seq(-10,10, by=1/8)
> plot(et, bi$mu.eta(et), type="l")
> ## show that mu.eta() is derivative of linkinv() :
> lines((et[-1]+et[-length(et)])/2, col=adjustcolor("red", 1/4),
+       diff(bi$linkinv(et))/diff(et), type="l", lwd=4)
> ## which here is the logistic density:
> lines(et, dlogis(et), lwd=3, col=adjustcolor("blue", 1/4))
> stopifnot(exprs = {
+   all.equal(bi$ mu.eta(et), dlogis(et))
+   all.equal(bi$linkinv(et), plogis(et) -> m)
+   all.equal(bi$linkfun(m ), qlogis(m))    #  logit(.) == qlogis(.) !
+ })
> 
> ## Data from example(glm) :
> d.AD <- data.frame(treatment = gl(3,3),
+                    outcome   = gl(3,1,9),
+                    counts    = c(18,17,15, 20,10,20, 25,13,12))
> glm.D93 <- glm(counts ~ outcome + treatment, d.AD, family = poisson())
> ## Quasipoisson: compare with above / example(glm) :
> glm.qD93 <- glm(counts ~ outcome + treatment, d.AD, family = quasipoisson())
> 
> 
> ## Example of user-specified link, a logit model for p^days
> ## See Shaffer, T.  2004. Auk 121(2): 526-540.
> logexp <- function(days = 1)
+ {
+     linkfun <- function(mu) qlogis(mu^(1/days))
+     linkinv <- function(eta) plogis(eta)^days
+     mu.eta  <- function(eta) days * plogis(eta)^(days-1) *
+                   binomial()$mu.eta(eta)
+     valideta <- function(eta) TRUE
+     link <- paste0("logexp(", days, ")")
+     structure(list(linkfun = linkfun, linkinv = linkinv,
+                    mu.eta = mu.eta, valideta = valideta, name = link),
+               class = "link-glm")
+ }
> (bil3 <- binomial(logexp(3)))

Family: binomial 
Link function: logexp(3) 

> ## Don't show: 
> stopifnot(length(bil3$mu.eta(as.double(0:5))) == 6)
> ## End(Don't show)
> ## in practice this would be used with a vector of 'days', in
> ## which case use an offset of 0 in the corresponding formula
> ## to get the null deviance right.
> 
> ## Binomial with identity link: often not a good idea, as both
> ## computationally and conceptually difficult:
> binomial(link = "identity")  ## is exactly the same as

Family: binomial 
Link function: identity 

> binomial(link = make.link("identity"))

Family: binomial 
Link function: identity 

> 
> 
> 
> ## tests of quasi
> x <- rnorm(100)
> y <- rpois(100, exp(1+x))
> glm(y ~ x, family = quasi(variance = "mu", link = "log"))

Call:  glm(formula = y ~ x, family = quasi(variance = "mu", link = "log"))

Coefficients:
(Intercept)            x  
     0.8596       1.0875  

Degrees of Freedom: 99 Total (i.e. Null);  98 Residual
Null Deviance:	    481.9 
Residual Deviance: 101.7 	AIC: NA
> # which is the same as
> glm(y ~ x, family = poisson)

Call:  glm(formula = y ~ x, family = poisson)

Coefficients:
(Intercept)            x  
     0.8596       1.0875  

Degrees of Freedom: 99 Total (i.e. Null);  98 Residual
Null Deviance:	    481.9 
Residual Deviance: 101.7 	AIC: 364
> glm(y ~ x, family = quasi(variance = "mu^2", link = "log"))

Call:  glm(formula = y ~ x, family = quasi(variance = "mu^2", link = "log"))

Coefficients:
(Intercept)            x  
     0.6902       1.4546  

Degrees of Freedom: 99 Total (i.e. Null);  98 Residual
Null Deviance:	    83.85 
Residual Deviance: 32.45 	AIC: NA
> ## Not run: glm(y ~ x, family = quasi(variance = "mu^3", link = "log")) # fails
> y <- rbinom(100, 1, plogis(x))
> # need to set a starting value for the next fit
> glm(y ~ x, family = quasi(variance = "mu(1-mu)", link = "logit"), start = c(0,1))

Call:  glm(formula = y ~ x, family = quasi(variance = "mu(1-mu)", link = "logit"), 
    start = c(0, 1))

Coefficients:
(Intercept)            x  
   -0.08334      0.38518  

Degrees of Freedom: 99 Total (i.e. Null);  98 Residual
Null Deviance:	    138.6 
Residual Deviance: 135.8 	AIC: NA
> 
> 
> 
> cleanEx()
> nameEx("fft")
> ### * fft
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: fft
> ### Title: Fast Discrete Fourier Transform (FFT)
> ### Aliases: fft mvfft
> ### Keywords: math dplot
> 
> ### ** Examples
> 
> x <- 1:4
> fft(x)
[1] 10+0i -2+2i -2+0i -2-2i
> fft(fft(x), inverse = TRUE)/length(x)
[1] 1+0i 2+0i 3+0i 4+0i
> 
> ## Slow Discrete Fourier Transform (DFT) - e.g., for checking the formula
> fft0 <- function(z, inverse=FALSE) {
+   n <- length(z)
+   if(n == 0) return(z)
+   k <- 0:(n-1)
+   ff <- (if(inverse) 1 else -1) * 2*pi * 1i * k/n
+   vapply(1:n, function(h) sum(z * exp(ff*(h-1))), complex(1))
+ }
> 
> relD <- function(x,y) 2* abs(x - y) / abs(x + y)
> n <- 2^8
> z <- complex(n, rnorm(n), rnorm(n))
> 
> 
> cleanEx()
> nameEx("filter")
> ### * filter
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: filter
> ### Title: Linear Filtering on a Time Series
> ### Aliases: filter
> ### Keywords: ts
> 
> ### ** Examples
> 
> x <- 1:100
> filter(x, rep(1, 3))
Time Series:
Start = 1 
End = 100 
Frequency = 1 
  [1]  NA   6   9  12  15  18  21  24  27  30  33  36  39  42  45  48  51  54
 [19]  57  60  63  66  69  72  75  78  81  84  87  90  93  96  99 102 105 108
 [37] 111 114 117 120 123 126 129 132 135 138 141 144 147 150 153 156 159 162
 [55] 165 168 171 174 177 180 183 186 189 192 195 198 201 204 207 210 213 216
 [73] 219 222 225 228 231 234 237 240 243 246 249 252 255 258 261 264 267 270
 [91] 273 276 279 282 285 288 291 294 297  NA
> filter(x, rep(1, 3), sides = 1)
Time Series:
Start = 1 
End = 100 
Frequency = 1 
  [1]  NA  NA   6   9  12  15  18  21  24  27  30  33  36  39  42  45  48  51
 [19]  54  57  60  63  66  69  72  75  78  81  84  87  90  93  96  99 102 105
 [37] 108 111 114 117 120 123 126 129 132 135 138 141 144 147 150 153 156 159
 [55] 162 165 168 171 174 177 180 183 186 189 192 195 198 201 204 207 210 213
 [73] 216 219 222 225 228 231 234 237 240 243 246 249 252 255 258 261 264 267
 [91] 270 273 276 279 282 285 288 291 294 297
> filter(x, rep(1, 3), sides = 1, circular = TRUE)
Time Series:
Start = 1 
End = 100 
Frequency = 1 
  [1] 200 103   6   9  12  15  18  21  24  27  30  33  36  39  42  45  48  51
 [19]  54  57  60  63  66  69  72  75  78  81  84  87  90  93  96  99 102 105
 [37] 108 111 114 117 120 123 126 129 132 135 138 141 144 147 150 153 156 159
 [55] 162 165 168 171 174 177 180 183 186 189 192 195 198 201 204 207 210 213
 [73] 216 219 222 225 228 231 234 237 240 243 246 249 252 255 258 261 264 267
 [91] 270 273 276 279 282 285 288 291 294 297
> 
> filter(presidents, rep(1, 3))
     Qtr1 Qtr2 Qtr3 Qtr4
1945   NA   NA  244  220
1946  188  156  125  110
1947  127  149  169  145
1948  130   NA   NA   NA
1949   NA  183  165  153
1950  133  128  122  121
1951   99   92   79   80
1952   80   NA   NA   NA
1953  165  208  209  206
1954  192  203  189  199
1955  196  218  220  228
1956  220  214  213  221
1957  216  204  182  180
1958  166  157  149  157
1959  171  180  189  198
1960  199  194  180  190
1961  212  226  232  228
1962  228  212  207  212
1963  214  202  183  199
1964  210  222  211  209
1965  204  204  195  194
1966  171  165  146  144
1967  140  134  136  120
1968  131  120  128  138
1969  168  189  186  187
1970  175  180  166  164
1971  151  153  151  152
1972  159   NA   NA   NA
1973   NA  152  111   95
1974   80   77   73   NA
> 
> 
> 
> cleanEx()
> nameEx("fisher.test")
> ### * fisher.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: fisher.test
> ### Title: Fisher's Exact Test for Count Data
> ### Aliases: fisher.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Agresti (1990, p. 61f; 2002, p. 91) Fisher's Tea Drinker
> ## A British woman claimed to be able to distinguish whether milk or
> ##  tea was added to the cup first.  To test, she was given 8 cups of
> ##  tea, in four of which milk was added first.  The null hypothesis
> ##  is that there is no association between the true order of pouring
> ##  and the woman's guess, the alternative that there is a positive
> ##  association (that the odds ratio is greater than 1).
> TeaTasting <-
+ matrix(c(3, 1, 1, 3),
+        nrow = 2,
+        dimnames = list(Guess = c("Milk", "Tea"),
+                        Truth = c("Milk", "Tea")))
> fisher.test(TeaTasting, alternative = "greater")

	Fisher's Exact Test for Count Data

data:  TeaTasting
p-value = 0.2429
alternative hypothesis: true odds ratio is greater than 1
95 percent confidence interval:
 0.3135693       Inf
sample estimates:
odds ratio 
  6.408309 

> ## => p = 0.2429, association could not be established
> 
> ## Fisher (1962, 1970), Criminal convictions of like-sex twins
> Convictions <- matrix(c(2, 10, 15, 3), nrow = 2,
+ 	              dimnames =
+ 	       list(c("Dizygotic", "Monozygotic"),
+ 		    c("Convicted", "Not convicted")))
> Convictions
            Convicted Not convicted
Dizygotic           2            15
Monozygotic        10             3
> fisher.test(Convictions, alternative = "less")

	Fisher's Exact Test for Count Data

data:  Convictions
p-value = 0.0004652
alternative hypothesis: true odds ratio is less than 1
95 percent confidence interval:
 0.0000000 0.2849601
sample estimates:
odds ratio 
0.04693661 

> fisher.test(Convictions, conf.int = FALSE)

	Fisher's Exact Test for Count Data

data:  Convictions
p-value = 0.0005367
alternative hypothesis: true odds ratio is not equal to 1
sample estimates:
odds ratio 
0.04693661 

> fisher.test(Convictions, conf.level = 0.95)$conf.int
[1] 0.003325764 0.363182271
attr(,"conf.level")
[1] 0.95
> fisher.test(Convictions, conf.level = 0.99)$conf.int
[1] 0.001386333 0.578851645
attr(,"conf.level")
[1] 0.99
> 
> ## A r x c table  Agresti (2002, p. 57) Job Satisfaction
> Job <- matrix(c(1,2,1,0, 3,3,6,1, 10,10,14,9, 6,7,12,11), 4, 4,
+            dimnames = list(income = c("< 15k", "15-25k", "25-40k", "> 40k"),
+                      satisfaction = c("VeryD", "LittleD", "ModerateS", "VeryS")))
> fisher.test(Job) # 0.7827

	Fisher's Exact Test for Count Data

data:  Job
p-value = 0.7827
alternative hypothesis: two.sided

> fisher.test(Job, simulate.p.value = TRUE, B = 1e5) # also close to 0.78

	Fisher's Exact Test for Count Data with simulated p-value (based on
	1e+05 replicates)

data:  Job
p-value = 0.7842
alternative hypothesis: two.sided

> 
> ## 6th example in Mehta & Patel's JASA paper
> MP6 <- rbind(
+         c(1,2,2,1,1,0,1),
+         c(2,0,0,2,3,0,0),
+         c(0,1,1,1,2,7,3),
+         c(1,1,2,0,0,0,1),
+         c(0,1,1,1,1,0,0))
> fisher.test(MP6)

	Fisher's Exact Test for Count Data

data:  MP6
p-value = 0.03929
alternative hypothesis: two.sided

> # Exactly the same p-value, as Cochran's conditions are not met:
> fisher.test(MP6, hybrid = TRUE)

	Fisher's Exact Test for Count Data hybrid using asym.chisq. iff (exp=5,
	perc=80, Emin=1)

data:  MP6
p-value = 0.03929
alternative hypothesis: two.sided

> 
> 
> 
> cleanEx()
> nameEx("fivenum")
> ### * fivenum
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: fivenum
> ### Title: Tukey Five-Number Summaries
> ### Aliases: fivenum
> ### Keywords: univar robust distribution
> 
> ### ** Examples
> 
> fivenum(c(rnorm(100), -1:1/0))
[1]       -Inf -0.5425200  0.1139092  0.6969634        Inf
> 
> 
> 
> cleanEx()
> nameEx("fligner.test")
> ### * fligner.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: fligner.test
> ### Title: Fligner-Killeen Test of Homogeneity of Variances
> ### Aliases: fligner.test fligner.test.default fligner.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> require(graphics)
> 
> plot(count ~ spray, data = InsectSprays)
> fligner.test(InsectSprays$count, InsectSprays$spray)

	Fligner-Killeen test of homogeneity of variances

data:  InsectSprays$count and InsectSprays$spray
Fligner-Killeen:med chi-squared = 14.483, df = 5, p-value = 0.01282

> fligner.test(count ~ spray, data = InsectSprays)

	Fligner-Killeen test of homogeneity of variances

data:  count by spray
Fligner-Killeen:med chi-squared = 14.483, df = 5, p-value = 0.01282

> ## Compare this to bartlett.test()
> 
> 
> 
> cleanEx()
> nameEx("formula")
> ### * formula
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: formula
> ### Title: Model Formulae
> ### Aliases: formula formula.default formula.formula formula.terms
> ###   formula.data.frame DF2formula as.formula print.formula [.formula
> ### Keywords: models
> 
> ### ** Examples
> 
> class(fo <- y ~ x1*x2) # "formula"
[1] "formula"
> fo
y ~ x1 * x2
> typeof(fo)  # R internal : "language"
[1] "language"
> terms(fo)
y ~ x1 * x2
attr(,"variables")
list(y, x1, x2)
attr(,"factors")
   x1 x2 x1:x2
y   0  0     0
x1  1  0     1
x2  0  1     1
attr(,"term.labels")
[1] "x1"    "x2"    "x1:x2"
attr(,"order")
[1] 1 1 2
attr(,"intercept")
[1] 1
attr(,"response")
[1] 1
attr(,".Environment")
<environment: R_GlobalEnv>
> 
> environment(fo)
<environment: R_GlobalEnv>
> environment(as.formula("y ~ x"))
<environment: R_GlobalEnv>
> environment(as.formula("y ~ x", env = new.env()))
<environment: 0x10699fb68>
> 
> 
> ## Create a formula for a model with a large number of variables:
> xnam <- paste0("x", 1:25)
> (fmla <- as.formula(paste("y ~ ", paste(xnam, collapse= "+"))))
y ~ x1 + x2 + x3 + x4 + x5 + x6 + x7 + x8 + x9 + x10 + x11 + 
    x12 + x13 + x14 + x15 + x16 + x17 + x18 + x19 + x20 + x21 + 
    x22 + x23 + x24 + x25
> ## Equivalent with reformulate():
> fmla2 <- reformulate(xnam, response = "y")
> stopifnot(identical(fmla, fmla2))
> 
> 
> 
> cleanEx()
> nameEx("formula.nls")
> ### * formula.nls
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: formula.nls
> ### Title: Extract Model Formula from 'nls' Object
> ### Aliases: formula.nls
> ### Keywords: models
> 
> ### ** Examples
> 
> fm1 <- nls(circumference ~ A/(1+exp((B-age)/C)), Orange,
+            start = list(A = 160, B = 700, C = 350))
> formula(fm1)
circumference ~ A/(1 + exp((B - age)/C))
> 
> 
> 
> cleanEx()
> nameEx("friedman.test")
> ### * friedman.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: friedman.test
> ### Title: Friedman Rank Sum Test
> ### Aliases: friedman.test friedman.test.default friedman.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Hollander & Wolfe (1973), p. 140ff.
> ## Comparison of three methods ("round out", "narrow angle", and
> ##  "wide angle") for rounding first base.  For each of 18 players
> ##  and the three method, the average time of two runs from a point on
> ##  the first base line 35ft from home plate to a point 15ft short of
> ##  second base is recorded.
> RoundingTimes <-
+ matrix(c(5.40, 5.50, 5.55,
+          5.85, 5.70, 5.75,
+          5.20, 5.60, 5.50,
+          5.55, 5.50, 5.40,
+          5.90, 5.85, 5.70,
+          5.45, 5.55, 5.60,
+          5.40, 5.40, 5.35,
+          5.45, 5.50, 5.35,
+          5.25, 5.15, 5.00,
+          5.85, 5.80, 5.70,
+          5.25, 5.20, 5.10,
+          5.65, 5.55, 5.45,
+          5.60, 5.35, 5.45,
+          5.05, 5.00, 4.95,
+          5.50, 5.50, 5.40,
+          5.45, 5.55, 5.50,
+          5.55, 5.55, 5.35,
+          5.45, 5.50, 5.55,
+          5.50, 5.45, 5.25,
+          5.65, 5.60, 5.40,
+          5.70, 5.65, 5.55,
+          6.30, 6.30, 6.25),
+        nrow = 22,
+        byrow = TRUE,
+        dimnames = list(1 : 22,
+                        c("Round Out", "Narrow Angle", "Wide Angle")))
> friedman.test(RoundingTimes)

	Friedman rank sum test

data:  RoundingTimes
Friedman chi-squared = 11.143, df = 2, p-value = 0.003805

> ## => strong evidence against the null that the methods are equivalent
> ##    with respect to speed
> 
> wb <- aggregate(warpbreaks$breaks,
+                 by = list(w = warpbreaks$wool,
+                           t = warpbreaks$tension),
+                 FUN = mean)
> wb
  w t        x
1 A L 44.55556
2 B L 28.22222
3 A M 24.00000
4 B M 28.77778
5 A H 24.55556
6 B H 18.77778
> friedman.test(wb$x, wb$w, wb$t)

	Friedman rank sum test

data:  wb$x, wb$w and wb$t
Friedman chi-squared = 0.33333, df = 1, p-value = 0.5637

> friedman.test(x ~ w | t, data = wb)

	Friedman rank sum test

data:  x and w and t
Friedman chi-squared = 0.33333, df = 1, p-value = 0.5637

> 
> 
> 
> cleanEx()
> nameEx("ftable")
> ### * ftable
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ftable
> ### Title: Flat Contingency Tables
> ### Aliases: ftable ftable.default
> ### Keywords: category
> 
> ### ** Examples
> 
> ## Start with a contingency table.
> ftable(Titanic, row.vars = 1:3)
                   Survived  No Yes
Class Sex    Age                   
1st   Male   Child            0   5
             Adult          118  57
      Female Child            0   1
             Adult            4 140
2nd   Male   Child            0  11
             Adult          154  14
      Female Child            0  13
             Adult           13  80
3rd   Male   Child           35  13
             Adult          387  75
      Female Child           17  14
             Adult           89  76
Crew  Male   Child            0   0
             Adult          670 192
      Female Child            0   0
             Adult            3  20
> ftable(Titanic, row.vars = 1:2, col.vars = "Survived")
             Survived  No Yes
Class Sex                    
1st   Male            118  62
      Female            4 141
2nd   Male            154  25
      Female           13  93
3rd   Male            422  88
      Female          106  90
Crew  Male            670 192
      Female            3  20
> ftable(Titanic, row.vars = 2:1, col.vars = "Survived")
             Survived  No Yes
Sex    Class                 
Male   1st            118  62
       2nd            154  25
       3rd            422  88
       Crew           670 192
Female 1st              4 141
       2nd             13  93
       3rd            106  90
       Crew             3  20
> ## Don't show: 
> . <- integer()
> (f04 <- ftable(Titanic, col.vars= .))
Class Sex    Age   Survived     
1st   Male   Child No          0
                   Yes         5
             Adult No        118
                   Yes        57
      Female Child No          0
                   Yes         1
             Adult No          4
                   Yes       140
2nd   Male   Child No          0
                   Yes        11
             Adult No        154
                   Yes        14
      Female Child No          0
                   Yes        13
             Adult No         13
                   Yes        80
3rd   Male   Child No         35
                   Yes        13
             Adult No        387
                   Yes        75
      Female Child No         17
                   Yes        14
             Adult No         89
                   Yes        76
Crew  Male   Child No          0
                   Yes         0
             Adult No        670
                   Yes       192
      Female Child No          0
                   Yes         0
             Adult No          3
                   Yes        20
> (f10 <- ftable(Titanic, col.vars= 1, row.vars= .))
Class 1st 2nd 3rd Crew
                      
      325 285 706  885
> (f01 <- ftable(Titanic, col.vars= ., row.vars= 1))
Class     
1st    325
2nd    285
3rd    706
Crew   885
> (f00 <- ftable(Titanic, col.vars= ., row.vars= .))
     
 2201
> stopifnot(
+   dim(f04) == c(32,1),
+   dim(f10) == c(1,4),
+   dim(f01) == c(4,1),
+   dim(f00) == c(1,1))
> ## End(Don't show)
> ## Start with a data frame.
> x <- ftable(mtcars[c("cyl", "vs", "am", "gear")])
> x
          gear  3  4  5
cyl vs am              
4   0  0        0  0  0
       1        0  0  1
    1  0        1  2  0
       1        0  6  1
6   0  0        0  0  0
       1        0  2  1
    1  0        2  2  0
       1        0  0  0
8   0  0       12  0  0
       1        0  0  2
    1  0        0  0  0
       1        0  0  0
> ftable(x, row.vars = c(2, 4))
        cyl  4     6     8   
        am   0  1  0  1  0  1
vs gear                      
0  3         0  0  0  0 12  0
   4         0  0  0  2  0  0
   5         0  1  0  1  0  2
1  3         1  0  2  0  0  0
   4         2  6  2  0  0  0
   5         0  1  0  0  0  0
> 
> ## Start with expressions, use table()'s "dnn" to change labels
> ftable(mtcars$cyl, mtcars$vs, mtcars$am, mtcars$gear, row.vars = c(2, 4),
+        dnn = c("Cylinders", "V/S", "Transmission", "Gears"))
          Cylinders     4     6     8   
          Transmission  0  1  0  1  0  1
V/S Gears                               
0   3                   0  0  0  0 12  0
    4                   0  0  0  2  0  0
    5                   0  1  0  1  0  2
1   3                   1  0  2  0  0  0
    4                   2  6  2  0  0  0
    5                   0  1  0  0  0  0
> 
> 
> 
> cleanEx()
> nameEx("ftable.formula")
> ### * ftable.formula
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ftable.formula
> ### Title: Formula Notation for Flat Contingency Tables
> ### Aliases: ftable.formula
> ### Keywords: category
> 
> ### ** Examples
> 
> Titanic
, , Age = Child, Survived = No

      Sex
Class  Male Female
  1st     0      0
  2nd     0      0
  3rd    35     17
  Crew    0      0

, , Age = Adult, Survived = No

      Sex
Class  Male Female
  1st   118      4
  2nd   154     13
  3rd   387     89
  Crew  670      3

, , Age = Child, Survived = Yes

      Sex
Class  Male Female
  1st     5      1
  2nd    11     13
  3rd    13     14
  Crew    0      0

, , Age = Adult, Survived = Yes

      Sex
Class  Male Female
  1st    57    140
  2nd    14     80
  3rd    75     76
  Crew  192     20

> x <- ftable(Survived ~ ., data = Titanic)
> x
                   Survived  No Yes
Class Sex    Age                   
1st   Male   Child            0   5
             Adult          118  57
      Female Child            0   1
             Adult            4 140
2nd   Male   Child            0  11
             Adult          154  14
      Female Child            0  13
             Adult           13  80
3rd   Male   Child           35  13
             Adult          387  75
      Female Child           17  14
             Adult           89  76
Crew  Male   Child            0   0
             Adult          670 192
      Female Child            0   0
             Adult            3  20
> ftable(Sex ~ Class + Age, data = x)
            Sex Male Female
Class Age                  
1st   Child        5      1
      Adult      175    144
2nd   Child       11     13
      Adult      168     93
3rd   Child       48     31
      Adult      462    165
Crew  Child        0      0
      Adult      862     23
> 
> 
> 
> cleanEx()
> nameEx("getInitial")
> ### * getInitial
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: getInitial
> ### Title: Get Initial Parameter Estimates
> ### Aliases: getInitial getInitial.default getInitial.formula
> ###   getInitial.selfStart
> ### Keywords: models nonlinear manip
> 
> ### ** Examples
> 
> PurTrt <- Puromycin[ Puromycin$state == "treated", ]
> print(getInitial( rate ~ SSmicmen( conc, Vm, K ), PurTrt ), digits = 3)
      Vm        K 
212.6837   0.0641 
> 
> 
> 
> cleanEx()
> nameEx("glm")
> ### * glm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: glm
> ### Title: Fitting Generalized Linear Models
> ### Aliases: glm glm.fit weights.glm
> ### Keywords: models regression
> 
> ### ** Examples
> 
> ## Dobson (1990) Page 93: Randomized Controlled Trial :
> counts <- c(18,17,15,20,10,20,25,13,12)
> outcome <- gl(3,1,9)
> treatment <- gl(3,3)
> data.frame(treatment, outcome, counts) # showing data
  treatment outcome counts
1         1       1     18
2         1       2     17
3         1       3     15
4         2       1     20
5         2       2     10
6         2       3     20
7         3       1     25
8         3       2     13
9         3       3     12
> glm.D93 <- glm(counts ~ outcome + treatment, family = poisson())
> anova(glm.D93)
Analysis of Deviance Table

Model: poisson, link: log

Response: counts

Terms added sequentially (first to last)


          Df Deviance Resid. Df Resid. Dev Pr(>Chi)  
NULL                          8    10.5814           
outcome    2   5.4523         6     5.1291  0.06547 .
treatment  2   0.0000         4     5.1291  1.00000  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> ## Computing AIC [in many ways]:
> (A0 <- AIC(glm.D93))
[1] 56.76132
> (ll <- logLik(glm.D93))
'log Lik.' -23.38066 (df=5)
> A1 <- -2*c(ll) + 2*attr(ll, "df")
> A2 <- glm.D93$family$aic(counts, mu=fitted(glm.D93), wt=1) +
+         2 * length(coef(glm.D93))
> stopifnot(exprs = {
+   all.equal(A0, A1)
+   all.equal(A1, A2)
+   all.equal(A1, glm.D93$aic)
+ })
> 
> 
> 
> # A Gamma example, from McCullagh & Nelder (1989, pp. 300-2)
> clotting <- data.frame(
+     u = c(5,10,15,20,30,40,60,80,100),
+     lot1 = c(118,58,42,35,27,25,21,19,18),
+     lot2 = c(69,35,26,21,18,16,13,12,12))
> summary(glm(lot1 ~ log(u), data = clotting, family = Gamma))

Call:
glm(formula = lot1 ~ log(u), family = Gamma, data = clotting)

Coefficients:
              Estimate Std. Error t value Pr(>|t|)    
(Intercept) -0.0165544  0.0009275  -17.85 4.28e-07 ***
log(u)       0.0153431  0.0004150   36.98 2.75e-09 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for Gamma family taken to be 0.002446059)

    Null deviance: 3.51283  on 8  degrees of freedom
Residual deviance: 0.01673  on 7  degrees of freedom
AIC: 37.99

Number of Fisher Scoring iterations: 3

> summary(glm(lot2 ~ log(u), data = clotting, family = Gamma))

Call:
glm(formula = lot2 ~ log(u), family = Gamma, data = clotting)

Coefficients:
              Estimate Std. Error t value Pr(>|t|)    
(Intercept) -0.0239085  0.0013265  -18.02 4.00e-07 ***
log(u)       0.0235992  0.0005768   40.91 1.36e-09 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for Gamma family taken to be 0.001813354)

    Null deviance: 3.118557  on 8  degrees of freedom
Residual deviance: 0.012672  on 7  degrees of freedom
AIC: 27.032

Number of Fisher Scoring iterations: 3

> ## Aliased ("S"ingular) -> 1 NA coefficient
> (fS <- glm(lot2 ~ log(u) + log(u^2), data = clotting, family = Gamma))

Call:  glm(formula = lot2 ~ log(u) + log(u^2), family = Gamma, data = clotting)

Coefficients:
(Intercept)       log(u)     log(u^2)  
   -0.02391      0.02360           NA  

Degrees of Freedom: 8 Total (i.e. Null);  7 Residual
Null Deviance:	    3.119 
Residual Deviance: 0.01267 	AIC: 27.03
> tools::assertError(update(fS, singular.ok=FALSE), verbose=interactive())
> ## -> .. "singular fit encountered"
> 
> ## Not run: 
> ##D ## for an example of the use of a terms object as a formula
> ##D demo(glm.vr)
> ## End(Not run)
> 
> 
> cleanEx()
> nameEx("glm.control")
> ### * glm.control
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: glm.control
> ### Title: Auxiliary for Controlling GLM Fitting
> ### Aliases: glm.control
> ### Keywords: optimize models
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("hclust")
> ### * hclust
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: hclust
> ### Title: Hierarchical Clustering
> ### Aliases: hclust plot.hclust print.hclust
> ### Keywords: multivariate cluster
> 
> ### ** Examples
> 
> require(graphics)
> 
> ### Example 1: Violent crime rates by US state
> 
> hc <- hclust(dist(USArrests), "ave")
> plot(hc)
> plot(hc, hang = -1)
> 
> ## Do the same with centroid clustering and *squared* Euclidean distance,
> ## cut the tree into ten clusters and reconstruct the upper part of the
> ## tree from the cluster centers.
> hc <- hclust(dist(USArrests)^2, "cen")
> memb <- cutree(hc, k = 10)
> cent <- NULL
> for(k in 1:10){
+   cent <- rbind(cent, colMeans(USArrests[memb == k, , drop = FALSE]))
+ }
> hc1 <- hclust(dist(cent)^2, method = "cen", members = table(memb))
> opar <- par(mfrow = c(1, 2))
> plot(hc,  labels = FALSE, hang = -1, main = "Original Tree")
> plot(hc1, labels = FALSE, hang = -1, main = "Re-start from 10 clusters")
> par(opar)
> 
> ### Example 2: Straight-line distances among 10 US cities
> ##  Compare the results of algorithms "ward.D" and "ward.D2"
> 
> mds2 <- -cmdscale(UScitiesD)
> plot(mds2, type="n", axes=FALSE, ann=FALSE)
> text(mds2, labels=rownames(mds2), xpd = NA)
> 
> hcity.D  <- hclust(UScitiesD, "ward.D") # "wrong"
> hcity.D2 <- hclust(UScitiesD, "ward.D2")
> opar <- par(mfrow = c(1, 2))
> plot(hcity.D,  hang=-1)
> plot(hcity.D2, hang=-1)
> par(opar)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("heatmap")
> ### * heatmap
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: heatmap
> ### Title: Draw a Heat Map
> ### Aliases: heatmap
> ### Keywords: hplot
> 
> ### ** Examples
> 
> require(graphics); require(grDevices)
> x  <- as.matrix(mtcars)
> rc <- rainbow(nrow(x), start = 0, end = .3)
> cc <- rainbow(ncol(x), start = 0, end = .3)
> hv <- heatmap(x, col = cm.colors(256), scale = "column",
+               RowSideColors = rc, ColSideColors = cc, margins = c(5,10),
+               xlab = "specification variables", ylab =  "Car Models",
+               main = "heatmap(<Mtcars data>, ..., scale = \"column\")")
> utils::str(hv) # the two re-ordering index vectors
List of 4
 $ rowInd: int [1:32] 31 17 16 15 5 25 29 24 7 6 ...
 $ colInd: int [1:11] 2 9 8 11 6 5 10 7 1 4 ...
 $ Rowv  : NULL
 $ Colv  : NULL
> 
> ## no column dendrogram (nor reordering) at all:
> heatmap(x, Colv = NA, col = cm.colors(256), scale = "column",
+         RowSideColors = rc, margins = c(5,10),
+         xlab = "specification variables", ylab =  "Car Models",
+         main = "heatmap(<Mtcars data>, ..., scale = \"column\")")
> ## Don't show: 
> ## no row dendrogram (nor reordering) at all:
> heatmap(x, Rowv = NA, col = cm.colors(256), scale = "column",
+         ColSideColors = cc, margins = c(5,10),
+         xlab = "xlab", ylab =  "ylab")  # no main
> ## End(Don't show)
> ## "no nothing"
> heatmap(x, Rowv = NA, Colv = NA, scale = "column",
+         main = "heatmap(*, NA, NA) ~= image(t(x))")
> 
> ## Demonstration of the 'scale' argument:
> ## The only change in the code is the 'scale' arg.
> ## The only visible change is in the color scale on the heatmap
> ## (the original data are not scaled).
> 
> heatmap(x, col = terrain.colors(128), scale = "column",
+         RowSideColors = rc,
+         ColSideColors = cc,
+         margins = c(5,10),
+         main = "heatmap(<Mtcars data>, ..., scale = \"column\")")
> heatmap(x, col = terrain.colors(128), scale = "none",
+         RowSideColors = rc,
+         ColSideColors = cc,
+         margins = c(5,10),
+         main = "heatmap(<Mtcars data>, ..., scale = \"none\")")
> 
> 
> round(Ca <- cor(attitude), 2)
           rating complaints privileges learning raises critical advance
rating       1.00       0.83       0.43     0.62   0.59     0.16    0.16
complaints   0.83       1.00       0.56     0.60   0.67     0.19    0.22
privileges   0.43       0.56       1.00     0.49   0.45     0.15    0.34
learning     0.62       0.60       0.49     1.00   0.64     0.12    0.53
raises       0.59       0.67       0.45     0.64   1.00     0.38    0.57
critical     0.16       0.19       0.15     0.12   0.38     1.00    0.28
advance      0.16       0.22       0.34     0.53   0.57     0.28    1.00
> symnum(Ca) # simple graphic
           rt cm p l rs cr a
rating     1                
complaints +  1             
privileges .  .  1          
learning   ,  .  . 1        
raises     .  ,  . , 1      
critical             .  1   
advance          . . .     1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> heatmap(Ca,               symm = TRUE, margins = c(6,6)) # with reorder()
> heatmap(Ca, Rowv = FALSE, symm = TRUE, margins = c(6,6)) # _NO_ reorder()
> ## slightly artificial with color bar, without and with ordering:
> cc <- rainbow(nrow(Ca))
> heatmap(Ca, Rowv = FALSE, symm = TRUE, RowSideColors = cc, ColSideColors = cc,
+ 	margins = c(6,6))
> heatmap(Ca,		symm = TRUE, RowSideColors = cc, ColSideColors = cc,
+ 	margins = c(6,6))
> 
> ## For variable clustering, rather use distance based on cor():
> symnum( cU <- cor(USJudgeRatings) )
     CO I DM DI CF DE PR F O W PH R
CONT 1                             
INTG    1                          
DMNR    B 1                        
DILG    + +  1                     
CFMG    + +  B  1                  
DECI    + +  B  B  1               
PREP    + +  B  B  B  1            
FAMI    + +  B  *  *  B  1         
ORAL    * *  B  B  *  B  B 1       
WRIT    * +  B  *  *  B  B B 1     
PHYS    , ,  +  +  +  +  + + + 1   
RTEN    * *  *  *  *  B  * B B *  1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> 
> hU <- heatmap(cU, Rowv = FALSE, symm = TRUE, col = topo.colors(16),
+              distfun = function(c) as.dist(1 - c), keep.dendro = TRUE)
> ## The Correlation matrix with same reordering:
> round(100 * cU[hU[[1]], hU[[2]]])
     CONT INTG DMNR PHYS DILG CFMG DECI RTEN ORAL WRIT PREP FAMI
CONT  100  -13  -15    5    1   14    9   -3   -1   -4    1   -3
INTG  -13  100   96   74   87   81   80   94   91   91   88   87
DMNR  -15   96  100   79   84   81   80   94   91   89   86   84
PHYS    5   74   79  100   81   88   87   91   89   86   85   84
DILG    1   87   84   81  100   96   96   93   95   96   98   96
CFMG   14   81   81   88   96  100   98   93   95   94   96   94
DECI    9   80   80   87   96   98  100   92   95   95   96   94
RTEN   -3   94   94   91   93   93   92  100   98   97   95   94
ORAL   -1   91   91   89   95   95   95   98  100   99   98   98
WRIT   -4   91   89   86   96   94   95   97   99  100   99   99
PREP    1   88   86   85   98   96   96   95   98   99  100   99
FAMI   -3   87   84   84   96   94   94   94   98   99   99  100
> ## The column dendrogram:
> utils::str(hU$Colv)
--[dendrogram w/ 2 branches and 12 members at h = 1.15]
  |--leaf "CONT" 
  `--[dendrogram w/ 2 branches and 11 members at h = 0.258]
     |--[dendrogram w/ 2 branches and 2 members at h = 0.0354]
     |  |--leaf "INTG" 
     |  `--leaf "DMNR" 
     `--[dendrogram w/ 2 branches and 9 members at h = 0.187]
        |--leaf "PHYS" 
        `--[dendrogram w/ 2 branches and 8 members at h = 0.075]
           |--[dendrogram w/ 2 branches and 3 members at h = 0.0438]
           |  |--leaf "DILG" 
           |  `--[dendrogram w/ 2 branches and 2 members at h = 0.0189]
           |     |--leaf "CFMG" 
           |     `--leaf "DECI" 
           `--[dendrogram w/ 2 branches and 5 members at h = 0.0584]
              |--leaf "RTEN" 
              `--[dendrogram w/ 2 branches and 4 members at h = 0.0187]
                 |--[dendrogram w/ 2 branches and 2 members at h = 0.00657]
                 |  |--leaf "ORAL" 
                 |  `--leaf "WRIT" 
                 `--[dendrogram w/ 2 branches and 2 members at h = 0.0101]
                    |--leaf "PREP" 
                    `--leaf "FAMI" 
> 
> 
> 
> cleanEx()
> nameEx("identify.hclust")
> ### * identify.hclust
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: identify.hclust
> ### Title: Identify Clusters in a Dendrogram
> ### Aliases: identify.hclust
> ### Keywords: cluster iplot
> 
> ### ** Examples
> ## Not run: 
> ##D require(graphics)
> ##D 
> ##D hca <- hclust(dist(USArrests))
> ##D plot(hca)
> ##D (x <- identify(hca)) ##  Terminate with 2nd mouse button !!
> ##D 
> ##D hci <- hclust(dist(iris[,1:4]))
> ##D plot(hci)
> ##D identify(hci, function(k) print(table(iris[k,5])))
> ##D 
> ##D # open a new device (one for dendrogram, one for bars):
> ##D dev.new() # << make that narrow (& small)
> ##D           # and *beside* 1st one
> ##D nD <- dev.cur()            # to be for the barplot
> ##D dev.set(dev.prev())  # old one for dendrogram
> ##D plot(hci)
> ##D ## select subtrees in dendrogram and "see" the species distribution:
> ##D identify(hci, function(k) barplot(table(iris[k,5]), col = 2:4), DEV.FUN = nD)
> ## End(Not run)
> 
> 
> cleanEx()
> nameEx("influence.measures")
> ### * influence.measures
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: influence.measures
> ### Title: Regression Deletion Diagnostics
> ### Aliases: influence.measures hat hatvalues hatvalues.lm rstandard
> ###   rstandard.lm rstandard.glm rstudent rstudent.lm rstudent.glm dfbeta
> ###   dfbeta.lm dfbetas dfbetas.lm dffits covratio cooks.distance
> ###   cooks.distance.lm cooks.distance.glm
> ### Keywords: regression
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Analysis of the life-cycle savings data
> ## given in Belsley, Kuh and Welsch.
> lm.SR <- lm(sr ~ pop15 + pop75 + dpi + ddpi, data = LifeCycleSavings)
> 
> inflm.SR <- influence.measures(lm.SR)
> which(apply(inflm.SR$is.inf, 1, any))
        Chile United States        Zambia         Libya 
            7            44            46            49 
> # which observations 'are' influential
> summary(inflm.SR) # only these
Potentially influential observations of
	 lm(formula = sr ~ pop15 + pop75 + dpi + ddpi, data = LifeCycleSavings) :

              dfb.1_ dfb.pp15 dfb.pp75 dfb.dpi dfb.ddpi dffit   cov.r   cook.d
Chile         -0.20   0.13     0.22    -0.02    0.12    -0.46    0.65_*  0.04 
United States  0.07  -0.07     0.04    -0.23   -0.03    -0.25    1.66_*  0.01 
Zambia         0.16  -0.08    -0.34     0.09    0.23     0.75    0.51_*  0.10 
Libya          0.55  -0.48    -0.38    -0.02   -1.02_*  -1.16_*  2.09_*  0.27 
              hat    
Chile          0.04  
United States  0.33_*
Zambia         0.06  
Libya          0.53_*
> plot(rstudent(lm.SR) ~ hatvalues(lm.SR)) # recommended by some
> plot(lm.SR, which = 5) # an enhanced version of that via plot(<lm>)
> 
> ## The 'infl' argument is not needed, but avoids recomputation:
> rs <- rstandard(lm.SR)
> iflSR <- influence(lm.SR)
> all.equal(rs, rstandard(lm.SR, infl = iflSR), tolerance = 1e-10)
[1] TRUE
> ## to "see" the larger values:
> 1000 * round(dfbetas(lm.SR, infl = iflSR), 3)
               (Intercept) pop15 pop75  dpi  ddpi
Australia               12   -10   -27   45     0
Austria                -10     6    41  -37    -8
Belgium                -64    51   121  -35    -7
Bolivia                  6   -13   -23   32    41
Brazil                  90   -62  -179  120    68
Canada                   5    -7    10  -35    -3
Chile                 -199   133   220  -20   120
China                   21    -6   -83   52   111
Colombia                39   -52   -25    2     9
Costa Rica            -234   284   142   56   -33
Denmark                -41    21    47  152    49
Ecuador                 72   -95   -61   20    48
Finland               -113   111   117  -44   -17
France                -166   147   219  -29    24
Germany                 -8     8     8   -7     0
Greece                -148   164    29  157   -60
Guatamala               16   -55     6    6    97
Honduras                -2    10   -10    8    -2
Iceland                248  -274  -233 -126   185
India                   21   -16   -14  -14   -19
Ireland               -310   296   482 -257   -93
Italy                   66   -71     3  -70   -29
Japan                  640  -656  -674  146   389
Korea                 -169   135   219    5  -169
Luxembourg             -68    69    44  -28    49
Malta                   37   -49     8  -87   153
Norway                   2     0    -6  -16    -1
Netherlands             14   -17   -12    4    23
New Zealand            -60    65    94  -26   -65
Nicaragua              -12    18    10   -5   -10
Panama                  28   -53    14  -35    -8
Paraguay              -232   164   158  144   270
Peru                   -72   147    91  -86  -287
Philippines           -157   227   157 -111  -171
Portugal               -21    26    -4   40   -28
South Africa            22   -20    -7  -20   -16
South Rhodesia         144  -135   -92  -70   -58
Spain                  -30    31     4   35     5
Sweden                 101   -82   -62 -255   -13
Switzerland             43   -46   -44   91   -19
Turkey                 -11   -12    26    2    25
Tunisia                 74  -105   -77   44   103
United Kingdom          47   -36  -171  126   100
United States           69   -73    37 -233   -33
Venezuela              -51   101   -34  114  -124
Zambia                 164   -79  -339   94   228
Jamaica                110  -100   -57   -7  -295
Uruguay               -134   129    30  131   100
Libya                  551  -483  -380  -19 -1024
Malaysia                37   -61    32  -50   -72
> cat("PRESS :"); (PRESS <- sum( rstandard(lm.SR, type = "predictive")^2 ))
PRESS :[1] 798.939
> stopifnot(all.equal(PRESS, sum( (residuals(lm.SR) / (1 - iflSR$hat))^2)))
> 
> ## Show that "PRE-residuals"  ==  L.O.O. Crossvalidation (CV) errors:
> X <- model.matrix(lm.SR)
> y <- model.response(model.frame(lm.SR))
> ## Leave-one-out CV least-squares prediction errors (relatively fast)
> rCV <- vapply(seq_len(nrow(X)), function(i)
+               y[i] - X[i,] %*% .lm.fit(X[-i,], y[-i])$coefficients,
+               numeric(1))
> ## are the same as the *faster* rstandard(*, "pred") :
> stopifnot(all.equal(rCV, unname(rstandard(lm.SR, type = "predictive"))))
> 
> 
> ## Huber's data [Atkinson 1985]
> xh <- c(-4:0, 10)
> yh <- c(2.48, .73, -.04, -1.44, -1.32, 0)
> lmH <- lm(yh ~ xh)
> im <- influence.measures(lmH)
> is.inf <- apply(im$is.inf, 1, any)
> plot(xh,yh, main = "Huber's data: L.S. line and influential obs.")
> abline(lmH); points(xh[is.inf], yh[is.inf], pch = 20, col = 2)
> 
> ## Irwin's data [Williams 1987]
> xi <- 1:5
> yi <- c(0,2,14,19,30)    # number of mice responding to dose xi
> mi <- rep(40, 5)         # number of mice exposed
> glmI <- glm(cbind(yi, mi -yi) ~ xi, family = binomial)
> signif(cooks.distance(glmI), 3)   # ~= Ci in Table 3, p.184
     1      2      3      4      5 
0.2520 0.2610 1.2900 0.0845 0.3640 
> imI <- influence.measures(glmI)
> stopifnot(all.equal(imI$infmat[,"cook.d"],
+           cooks.distance(glmI)))
> 
> 
> 
> cleanEx()
> nameEx("integrate")
> ### * integrate
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: integrate
> ### Title: Integration of One-Dimensional Functions
> ### Aliases: integrate print.integrate
> ### Keywords: math utilities
> 
> ### ** Examples
> 
> integrate(dnorm, -1.96, 1.96)
0.9500042 with absolute error < 1e-11
> integrate(dnorm, -Inf, Inf)
1 with absolute error < 9.4e-05
> 
> ## a slowly-convergent integral
> integrand <- function(x) {1/((x+1)*sqrt(x))}
> integrate(integrand, lower = 0, upper = Inf)
3.141593 with absolute error < 2.7e-05
> 
> ## don't do this if you really want the integral from 0 to Inf
> integrate(integrand, lower = 0, upper = 10)
2.529038 with absolute error < 3e-04
> integrate(integrand, lower = 0, upper = 100000)
3.135268 with absolute error < 4.2e-07
> integrate(integrand, lower = 0, upper = 1000000, stop.on.error = FALSE)
failed with message ‘the integral is probably divergent’
> 
> ## some functions do not handle vector input properly
> f <- function(x) 2.0
> try(integrate(f, 0, 1))
Error in integrate(f, 0, 1) : 
  evaluation of function gave a result of wrong length
> integrate(Vectorize(f), 0, 1)  ## correct
2 with absolute error < 2.2e-14
> integrate(function(x) rep(2.0, length(x)), 0, 1)  ## correct
2 with absolute error < 2.2e-14
> 
> ## integrate can fail if misused
> integrate(dnorm, 0, 2)
0.4772499 with absolute error < 5.3e-15
> integrate(dnorm, 0, 20)
0.5 with absolute error < 3.7e-05
> integrate(dnorm, 0, 200)
0.5 with absolute error < 1.6e-07
> integrate(dnorm, 0, 2000)
0.5 with absolute error < 4.4e-06
> integrate(dnorm, 0, 20000) ## "fails" on many systems -- "wrongly" giving '0' 
0 with absolute error < 0
> integrate(dnorm, 0, Inf)   ## works
0.5 with absolute error < 4.7e-05
> ## Don't show: 
> tools::assertError(
+ ## End(Don't show)
+ integrate(dnorm, 0:1, 20) #-> error!
+ ## "silently" gave  integrate(dnorm, 0, 20)  in earlier versions of R
+ ## Don't show: 
+  , verbose=TRUE)
Asserted error: length(lower) == 1 is not TRUE
> ## End(Don't show)
> 
> 
> 
> cleanEx()
> nameEx("interaction.plot")
> ### * interaction.plot
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: interaction.plot
> ### Title: Two-way Interaction Plot
> ### Aliases: interaction.plot
> ### Keywords: hplot
> 
> ### ** Examples
> 
> require(graphics)
> 
> with(ToothGrowth, {
+ interaction.plot(dose, supp, len, fixed = TRUE)
+ dose <- ordered(dose)
+ interaction.plot(dose, supp, len, fixed = TRUE,
+                  col = 2:3, leg.bty = "o", xtick = TRUE)
+ interaction.plot(dose, supp, len, fixed = TRUE, col = 2:3, type = "p")
+ })
> 
> with(OrchardSprays, {
+   interaction.plot(treatment, rowpos, decrease)
+   interaction.plot(rowpos, treatment, decrease, cex.axis = 0.8)
+   ## order the rows by their mean effect
+   rowpos <- factor(rowpos,
+                    levels = sort.list(tapply(decrease, rowpos, mean)))
+   interaction.plot(rowpos, treatment, decrease, col = 2:9, lty = 1)
+ })
> 
> 
> 
> 
> cleanEx()
> nameEx("is.empty")
> ### * is.empty
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: is.empty.model
> ### Title: Test if a Model's Formula is Empty
> ### Aliases: is.empty.model
> ### Keywords: models
> 
> ### ** Examples
> 
> y <- rnorm(20)
> is.empty.model(y ~ 0)
[1] TRUE
> is.empty.model(y ~ -1)
[1] TRUE
> is.empty.model(lm(y ~ 0))
[1] TRUE
> 
> 
> 
> cleanEx()
> nameEx("isoreg")
> ### * isoreg
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: isoreg
> ### Title: Isotonic / Monotone Regression
> ### Aliases: isoreg
> ### Keywords: regression smooth
> 
> ### ** Examples
> 
> require(graphics)
> 
> (ir <- isoreg(c(1,0,4,3,3,5,4,2,0)))
Isotonic regression from isoreg(x = c(1, 0, 4, 3, 3, 5, 4, 2, 0)),
  with 2 knots / breaks at obs.nr. 2 9 ;
  initially ordered 'x'
  and further components List of 4
 $ x : num [1:9] 1 2 3 4 5 6 7 8 9
 $ y : num [1:9] 1 0 4 3 3 5 4 2 0
 $ yf: num [1:9] 0.5 0.5 3 3 3 3 3 3 3
 $ yc: num [1:10] 0 1 1 5 8 11 16 20 22 22
> plot(ir, plot.type = "row")
> 
> (ir3 <- isoreg(y3 <- c(1,0,4,3,3,5,4,2, 3))) # last "3", not "0"
Isotonic regression from isoreg(x = y3 <- c(1, 0, 4, 3, 3, 5, 4, 2, 3)),
  with 3 knots / breaks at obs.nr. 2 5 9 ;
  initially ordered 'x'
  and further components List of 4
 $ x : num [1:9] 1 2 3 4 5 6 7 8 9
 $ y : num [1:9] 1 0 4 3 3 5 4 2 3
 $ yf: num [1:9] 0.5 0.5 3.33 3.33 3.33 ...
 $ yc: num [1:10] 0 1 1 5 8 11 16 20 22 25
> (fi3 <- as.stepfun(ir3))
Step function
Call: isoreg(x = y3 <- c(1, 0, 4, 3, 3, 5, 4, 2, 3))
 x[1:3] =      2,      5,      9
4 plateau levels =    0.5,    0.5, 3.3333,    3.5
> (ir4 <- isoreg(1:10, y4 <- c(5, 9, 1:2, 5:8, 3, 8)))
Isotonic regression from isoreg(x = 1:10, y = y4 <- c(5, 9, 1:2, 5:8, 3, 8)),
  with 5 knots / breaks at obs.nr. 4 5 6 9 10 ;
  initially ordered 'x'
  and further components List of 4
 $ x : num [1:10] 1 2 3 4 5 6 7 8 9 10
 $ y : num [1:10] 5 9 1 2 5 6 7 8 3 8
 $ yf: num [1:10] 4.25 4.25 4.25 4.25 5 6 6 6 6 8
 $ yc: num [1:11] 0 5 14 15 17 22 28 35 43 46 ...
> cat(sprintf("R^2 = %.2f\n",
+             1 - sum(residuals(ir4)^2) / ((10-1)*var(y4))))
R^2 = 0.21
> 
> ## If you are interested in the knots alone :
> with(ir4, cbind(iKnots, yf[iKnots]))
     iKnots     
[1,]      4 4.25
[2,]      5 5.00
[3,]      6 6.00
[4,]      9 6.00
[5,]     10 8.00
> 
> ## Example of unordered x[] with ties:
> x <- sample((0:30)/8)
> y <- exp(x)
> x. <- round(x) # ties!
> plot(m <- isoreg(x., y))
> stopifnot(all.equal(with(m, yf[iKnots]),
+                     as.vector(tapply(y, x., mean))))
> 
> 
> 
> cleanEx()
> nameEx("kernapply")
> ### * kernapply
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: kernapply
> ### Title: Apply Smoothing Kernel
> ### Aliases: kernapply kernapply.default kernapply.ts kernapply.tskernel
> ###   kernapply.vector
> ### Keywords: ts
> 
> ### ** Examples
> 
> ## see 'kernel' for examples
> 
> 
> 
> cleanEx()
> nameEx("kernel")
> ### * kernel
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: kernel
> ### Title: Smoothing Kernel Objects
> ### Aliases: kernel bandwidth.kernel df.kernel is.tskernel plot.tskernel
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Demonstrate a simple trading strategy for the
> ## financial time series German stock index DAX.
> x <- EuStockMarkets[,1]
> k1 <- kernel("daniell", 50)  # a long moving average
> k2 <- kernel("daniell", 10)  # and a short one
> plot(k1)
> plot(k2)
> x1 <- kernapply(x, k1)
> x2 <- kernapply(x, k2)
> plot(x)
> lines(x1, col = "red")    # go long if the short crosses the long upwards
> lines(x2, col = "green")  # and go short otherwise
> 
> ## More interesting kernels
> kd <- kernel("daniell", c(3, 3))
> kd # note the unusual indexing
Daniell(3,3) 
coef[-6] = 0.02041
coef[-5] = 0.04082
coef[-4] = 0.06122
coef[-3] = 0.08163
coef[-2] = 0.10204
coef[-1] = 0.12245
coef[ 0] = 0.14286
coef[ 1] = 0.12245
coef[ 2] = 0.10204
coef[ 3] = 0.08163
coef[ 4] = 0.06122
coef[ 5] = 0.04082
coef[ 6] = 0.02041
> kd[-2:2]
[1] 0.1020408 0.1224490 0.1428571 0.1224490 0.1020408
> plot(kernel("fejer", 100, r = 6))
> plot(kernel("modified.daniell", c(7,5,3)))
> 
> # Reproduce example 10.4.3 from Brockwell and Davis (1991)
> spectrum(sunspot.year, kernel = kernel("daniell", c(11,7,3)), log = "no")
> 
> 
> 
> cleanEx()
> nameEx("kmeans")
> ### * kmeans
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: kmeans
> ### Title: K-Means Clustering
> ### Aliases: kmeans print.kmeans fitted.kmeans
> ### Keywords: multivariate cluster
> 
> ### ** Examples
> 
> require(graphics)
> 
> # a 2-dimensional example
> x <- rbind(matrix(rnorm(100, sd = 0.3), ncol = 2),
+            matrix(rnorm(100, mean = 1, sd = 0.3), ncol = 2))
> colnames(x) <- c("x", "y")
> (cl <- kmeans(x, 2))
K-means clustering with 2 clusters of sizes 49, 51

Cluster means:
           x          y
1 0.02149367 0.02121248
2 0.94443633 1.01712793

Clustering vector:
  [1] 1 1 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
 [38] 1 1 1 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2
 [75] 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2

Within cluster sum of squares by cluster:
[1] 6.525480 8.392416
 (between_SS / total_SS =  75.5 %)

Available components:

[1] "cluster"      "centers"      "totss"        "withinss"     "tot.withinss"
[6] "betweenss"    "size"         "iter"         "ifault"      
> plot(x, col = cl$cluster)
> points(cl$centers, col = 1:2, pch = 8, cex = 2)
> 
> # sum of squares
> ss <- function(x) sum(scale(x, scale = FALSE)^2)
> 
> ## cluster centers "fitted" to each obs.:
> fitted.x <- fitted(cl);  head(fitted.x)
           x          y
1 0.02149367 0.02121248
1 0.02149367 0.02121248
1 0.02149367 0.02121248
1 0.02149367 0.02121248
1 0.02149367 0.02121248
1 0.02149367 0.02121248
> resid.x <- x - fitted(cl)
> 
> ## Equalities : ----------------------------------
> cbind(cl[c("betweenss", "tot.withinss", "totss")], # the same two columns
+          c(ss(fitted.x), ss(resid.x),    ss(x)))
             [,1]     [,2]    
betweenss    46.07333 46.07333
tot.withinss 14.9179  14.9179 
totss        60.99123 60.99123
> stopifnot(all.equal(cl$ totss,        ss(x)),
+ 	  all.equal(cl$ tot.withinss, ss(resid.x)),
+ 	  ## these three are the same:
+ 	  all.equal(cl$ betweenss,    ss(fitted.x)),
+ 	  all.equal(cl$ betweenss, cl$totss - cl$tot.withinss),
+ 	  ## and hence also
+ 	  all.equal(ss(x), ss(fitted.x) + ss(resid.x))
+ 	  )
> 
> kmeans(x,1)$withinss # trivial one-cluster, (its W.SS == ss(x))
[1] 60.99123
> 
> ## random starts do help here with too many clusters
> ## (and are often recommended anyway!):
> ## The ordering of the clusters may be platform-dependent.
> ## IGNORE_RDIFF_BEGIN
> (cl <- kmeans(x, 5, nstart = 25))
K-means clustering with 5 clusters of sizes 12, 15, 25, 24, 24

Cluster means:
           x          y
1  1.3290081  1.1185534
2  0.8609139  1.3145869
3 -0.1096832  0.2106891
4  0.1581362 -0.1761590
5  0.8043520  0.7805033

Clustering vector:
  [1] 3 4 3 4 3 3 4 4 3 3 5 4 3 3 4 3 4 3 4 3 4 4 3 3 4 3 4 3 3 4 4 4 3 4 3 3 3
 [38] 4 4 4 4 3 3 3 3 3 4 4 4 4 2 5 5 5 5 1 1 1 5 1 2 5 1 2 5 2 5 5 1 2 2 1 2 5
 [75] 5 1 2 2 2 2 5 2 1 5 1 5 2 5 5 5 5 1 5 2 5 5 1 5 2 5

Within cluster sum of squares by cluster:
[1] 1.0314888 0.7104553 2.5330710 1.2816507 1.5056575
 (between_SS / total_SS =  88.4 %)

Available components:

[1] "cluster"      "centers"      "totss"        "withinss"     "tot.withinss"
[6] "betweenss"    "size"         "iter"         "ifault"      
> ## IGNORE_RDIFF_END
> plot(x, col = cl$cluster)
> points(cl$centers, col = 1:5, pch = 8)
> 
> 
> 
> cleanEx()
> nameEx("kruskal.test")
> ### * kruskal.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: kruskal.test
> ### Title: Kruskal-Wallis Rank Sum Test
> ### Aliases: kruskal.test kruskal.test.default kruskal.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Hollander & Wolfe (1973), 116.
> ## Mucociliary efficiency from the rate of removal of dust in normal
> ##  subjects, subjects with obstructive airway disease, and subjects
> ##  with asbestosis.
> x <- c(2.9, 3.0, 2.5, 2.6, 3.2) # normal subjects
> y <- c(3.8, 2.7, 4.0, 2.4)      # with obstructive airway disease
> z <- c(2.8, 3.4, 3.7, 2.2, 2.0) # with asbestosis
> kruskal.test(list(x, y, z))

	Kruskal-Wallis rank sum test

data:  list(x, y, z)
Kruskal-Wallis chi-squared = 0.77143, df = 2, p-value = 0.68

> ## Equivalently,
> x <- c(x, y, z)
> g <- factor(rep(1:3, c(5, 4, 5)),
+             labels = c("Normal subjects",
+                        "Subjects with obstructive airway disease",
+                        "Subjects with asbestosis"))
> kruskal.test(x, g)

	Kruskal-Wallis rank sum test

data:  x and g
Kruskal-Wallis chi-squared = 0.77143, df = 2, p-value = 0.68

> 
> ## Formula interface.
> require(graphics)
> boxplot(Ozone ~ Month, data = airquality)
> kruskal.test(Ozone ~ Month, data = airquality)

	Kruskal-Wallis rank sum test

data:  Ozone by Month
Kruskal-Wallis chi-squared = 29.267, df = 4, p-value = 6.901e-06

> 
> 
> 
> cleanEx()
> nameEx("ks.test")
> ### * ks.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Encoding: UTF-8
> ### Name: ks.test
> ### Title: Kolmogorov-Smirnov Tests
> ### Aliases: ks.test ks.test.default ks.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> require("graphics")
> 
> x <- rnorm(50)
> y <- runif(30)
> # Do x and y come from the same distribution?
> ks.test(x, y)

	Exact two-sample Kolmogorov-Smirnov test

data:  x and y
D = 0.48, p-value = 0.0002033
alternative hypothesis: two-sided

> # Does x come from a shifted gamma distribution with shape 3 and rate 2?
> ks.test(x+2, "pgamma", 3, 2) # two-sided, exact

	Exact one-sample Kolmogorov-Smirnov test

data:  x + 2
D = 0.40962, p-value = 4.227e-08
alternative hypothesis: two-sided

> ks.test(x+2, "pgamma", 3, 2, exact = FALSE)

	Asymptotic one-sample Kolmogorov-Smirnov test

data:  x + 2
D = 0.40962, p-value = 1.033e-07
alternative hypothesis: two-sided

> ks.test(x+2, "pgamma", 3, 2, alternative = "gr")

	Exact one-sample Kolmogorov-Smirnov test

data:  x + 2
D^+ = 0.039998, p-value = 0.8302
alternative hypothesis: the CDF of x lies above the null hypothesis

> 
> # test if x is stochastically larger than x2
> x2 <- rnorm(50, -1)
> plot(ecdf(x), xlim = range(c(x, x2)))
> plot(ecdf(x2), add = TRUE, lty = "dashed")
> t.test(x, x2, alternative = "g")

	Welch Two Sample t-test

data:  x and x2
t = 5.6742, df = 96.85, p-value = 7.242e-08
alternative hypothesis: true difference in means is greater than 0
95 percent confidence interval:
 0.7069751       Inf
sample estimates:
 mean of x  mean of y 
 0.1004483 -0.8990693 

> wilcox.test(x, x2, alternative = "g")

	Wilcoxon rank sum test with continuity correction

data:  x and x2
W = 1983, p-value = 2.212e-07
alternative hypothesis: true location shift is greater than 0

> ks.test(x, x2, alternative = "l")

	Exact two-sample Kolmogorov-Smirnov test

data:  x and x2
D^- = 0.5, p-value = 2.404e-06
alternative hypothesis: the CDF of x lies below that of y

> 
> # with ties, example from Schröer and Trenkler (1995)
> # D = 3/7, p = 8/33 = 0.242424..
> ks.test(c(1, 2, 2, 3, 3),
+         c(1, 2, 3, 3, 4, 5, 6))# -> exact

	Exact two-sample Kolmogorov-Smirnov test

data:  c(1, 2, 2, 3, 3) and c(1, 2, 3, 3, 4, 5, 6)
D = 0.42857, p-value = 0.2424
alternative hypothesis: two-sided

> 
> # formula interface, see ?wilcox.test
> ks.test(Ozone ~ Month, data = airquality,
+         subset = Month %in% c(5, 8))

	Exact two-sample Kolmogorov-Smirnov test

data:  Ozone by Month
D = 0.53846, p-value = 0.0006919
alternative hypothesis: two-sided

> 
> 
> 
> cleanEx()
> nameEx("ksmooth")
> ### * ksmooth
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ksmooth
> ### Title: Kernel Regression Smoother
> ### Aliases: ksmooth
> ### Keywords: smooth
> 
> ### ** Examples
> 
> require(graphics)
> 
> with(cars, {
+     plot(speed, dist)
+     lines(ksmooth(speed, dist, "normal", bandwidth = 2), col = 2)
+     lines(ksmooth(speed, dist, "normal", bandwidth = 5), col = 3)
+ })
> 
> 
> 
> cleanEx()
> nameEx("lag")
> ### * lag
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: lag
> ### Title: Lag a Time Series
> ### Aliases: lag lag.default
> ### Keywords: ts
> 
> ### ** Examples
> 
> lag(ldeaths, 12) # starts one year earlier
      Jan  Feb  Mar  Apr  May  Jun  Jul  Aug  Sep  Oct  Nov  Dec
1973 3035 2552 2704 2554 2014 1655 1721 1524 1596 2074 2199 2512
1974 2933 2889 2938 2497 1870 1726 1607 1545 1396 1787 2076 2837
1975 2787 3891 3179 2011 1636 1580 1489 1300 1356 1653 2013 2823
1976 3102 2294 2385 2444 1748 1554 1498 1361 1346 1564 1640 2293
1977 2815 3137 2679 1969 1870 1633 1529 1366 1357 1570 1535 2491
1978 3084 2605 2573 2143 1693 1504 1461 1354 1333 1492 1781 1915
> 
> 
> 
> cleanEx()
> nameEx("lag.plot")
> ### * lag.plot
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: lag.plot
> ### Title: Time Series Lag Plots
> ### Aliases: lag.plot
> ### Keywords: hplot ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> lag.plot(nhtemp, 8, diag.col = "forest green")
> lag.plot(nhtemp, 5, main = "Average Temperatures in New Haven")
> ## ask defaults to TRUE when we have more than one page:
> lag.plot(nhtemp, 6, layout = c(2,1), asp = NA,
+          main = "New Haven Temperatures", col.main = "blue")
> 
> ## Multivariate (but non-stationary! ...)
> lag.plot(freeny.x, lags = 3)
> 
> ## no lines for long series :
> lag.plot(sqrt(sunspots), set.lags = c(1:4, 9:12), pch = ".", col = "gold")
> 
> 
> 
> cleanEx()
> nameEx("line")
> ### * line
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: line
> ### Title: Robust Line Fitting
> ### Aliases: line residuals.tukeyline
> ### Keywords: robust regression
> 
> ### ** Examples
> 
> require(graphics)
> 
> plot(cars)
> (z <- line(cars))

Call:
line(cars)

Coefficients:
[1]  -29.333    4.667

> abline(coef(z))
> ## Tukey-Anscombe Plot :
> plot(residuals(z) ~ fitted(z), main = deparse(z$call))
> 
> ## Andrew Siegel's pathological 9-point data, y-values multiplied by 3:
> d.AS <- data.frame(x = c(-4:3, 12), y = 3*c(rep(0,6), -5, 5, 1))
> cAS <- with(d.AS, t(sapply(1:10,
+                    function(it) line(x,y, iter=it)$coefficients)))
> dimnames(cAS) <- list(paste("it =", format(1:10)), c("intercept", "slope"))
> cAS
        intercept     slope
it =  1  0.500000  0.500000
it =  2 -0.250000 -0.250000
it =  3  0.875000  0.875000
it =  4 -0.812500 -0.812500
it =  5  1.718750  1.718750
it =  6 -2.078125 -2.078125
it =  7  2.500000  2.500000
it =  8 -2.083333 -2.083333
it =  9  2.500000  2.500000
it = 10 -2.083333 -2.083333
> ## iterations started to oscillate, repeating iteration 7,8 indefinitely
> 
> 
> 
> cleanEx()
> nameEx("lm")
> ### * lm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: lm
> ### Title: Fitting Linear Models
> ### Aliases: lm print.lm
> ### Keywords: regression
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
> ## Page 9: Plant Weight Data.
> ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
> trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
> group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
> weight <- c(ctl, trt)
> lm.D9 <- lm(weight ~ group)
> lm.D90 <- lm(weight ~ group - 1) # omitting intercept
> opar <- par(mfrow = c(2,2), oma = c(0, 0, 1.1, 0))
> plot(lm.D9, las = 1)      # Residuals, Fitted, ...
> par(opar)
> ## Don't show: 
> ## model frame :
> stopifnot(identical(lm(weight ~ group, method = "model.frame"),
+                     model.frame(lm.D9)))
> ## End(Don't show)
> ### less simple examples in "See Also" above
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("lm.influence")
> ### * lm.influence
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: lm.influence
> ### Title: Regression Diagnostics
> ### Aliases: lm.influence influence influence.lm influence.glm qr.influence
> ### Keywords: regression
> 
> ### ** Examples
> 
> ## Analysis of the life-cycle savings data
> ## given in Belsley, Kuh and Welsch.
> summary(lm.SR <- lm(sr ~ pop15 + pop75 + dpi + ddpi,
+                     data = LifeCycleSavings),
+         correlation = TRUE)

Call:
lm(formula = sr ~ pop15 + pop75 + dpi + ddpi, data = LifeCycleSavings)

Residuals:
    Min      1Q  Median      3Q     Max 
-8.2422 -2.6857 -0.2488  2.4280  9.7509 

Coefficients:
              Estimate Std. Error t value Pr(>|t|)    
(Intercept) 28.5660865  7.3545161   3.884 0.000334 ***
pop15       -0.4611931  0.1446422  -3.189 0.002603 ** 
pop75       -1.6914977  1.0835989  -1.561 0.125530    
dpi         -0.0003369  0.0009311  -0.362 0.719173    
ddpi         0.4096949  0.1961971   2.088 0.042471 *  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 3.803 on 45 degrees of freedom
Multiple R-squared:  0.3385,	Adjusted R-squared:  0.2797 
F-statistic: 5.756 on 4 and 45 DF,  p-value: 0.0007904

Correlation of Coefficients:
      (Intercept) pop15 pop75 dpi  
pop15 -0.98                        
pop75 -0.81        0.77            
dpi   -0.17        0.18 -0.37      
ddpi  -0.19        0.10 -0.05  0.26

> utils::str(lmI <- lm.influence(lm.SR))
List of 4
 $ hat         : Named num [1:50] 0.0677 0.1204 0.0875 0.0895 0.0696 ...
  ..- attr(*, "names")= chr [1:50] "Australia" "Austria" "Belgium" "Bolivia" ...
 $ coefficients: num [1:50, 1:5] 0.0916 -0.0747 -0.4752 0.0429 0.6604 ...
  ..- attr(*, "dimnames")=List of 2
  .. ..$ : chr [1:50] "Australia" "Austria" "Belgium" "Bolivia" ...
  .. ..$ : chr [1:5] "(Intercept)" "pop15" "pop75" "dpi" ...
 $ sigma       : Named num [1:50] 3.84 3.84 3.83 3.84 3.81 ...
  ..- attr(*, "names")= chr [1:50] "Australia" "Austria" "Belgium" "Bolivia" ...
 $ wt.res      : Named num [1:50] 0.864 0.616 2.219 -0.698 3.553 ...
  ..- attr(*, "names")= chr [1:50] "Australia" "Austria" "Belgium" "Bolivia" ...
> 
> qRes <- qr(lm.SR) # == lm.SR $ qr
> qrI <- qr.influence(qRes, residuals(lm.SR))
> strip <- function(x) lapply(lapply(x, unname), drop)
> stopifnot(identical(strip(qrI),
+                     strip(lmI[c("hat", "sigma")])))
> 
> ## For more "user level" examples, use example(influence.measures)
> 
> 
> 
> cleanEx()
> nameEx("lm.summaries")
> ### * lm.summaries
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: lm.summaries
> ### Title: Accessing Linear Model Fits
> ### Aliases: family.lm formula.lm residuals.lm labels.lm
> ### Keywords: regression models
> 
> ### ** Examples
> 
> ## Don't show: 
> utils::example("lm", echo = FALSE)
> ## End(Don't show)
> ##-- Continuing the  lm(.) example:
> coef(lm.D90) # the bare coefficients
groupCtl groupTrt 
   5.032    4.661 
> 
> ## The 2 basic regression diagnostic plots [plot.lm(.) is preferred]
> plot(resid(lm.D90), fitted(lm.D90)) # Tukey-Anscombe's
> abline(h = 0, lty = 2, col = "gray")
> 
> qqnorm(residuals(lm.D90))
> 
> 
> 
> cleanEx()
> nameEx("lmfit")
> ### * lmfit
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: lm.fit
> ### Title: Fitter Functions for Linear Models
> ### Aliases: lm.fit lm.wfit .lm.fit
> ### Keywords: regression array
> 
> ### ** Examples
> 
> require(utils)
> set.seed(129)
> 
> n <- 7 ; p <- 2
> X <- matrix(rnorm(n * p), n, p) # no intercept!
> y <- rnorm(n)
> w <- rnorm(n)^2
> 
> str(lmw <- lm.wfit(x = X, y = y, w = w))
List of 9
 $ coefficients : Named num [1:2] -0.0432 -0.5612
  ..- attr(*, "names")= chr [1:2] "x1" "x2"
 $ residuals    : num [1:7] -0.132 -1.308 -0.256 1.468 0.439 ...
 $ fitted.values: num [1:7] 0.05 -0.5232 0.6151 -0.5766 0.0512 ...
 $ effects      : Named num [1:7] 0.804 1.722 -0.072 2.047 0.392 ...
  ..- attr(*, "names")= chr [1:7] "x1" "x2" "" "" ...
 $ weights      : num [1:7] 0.3195 0.0123 0.0569 1.8154 0.8359 ...
 $ rank         : int 2
 $ assign       : NULL
 $ qr           :List of 5
  ..$ qr   : num [1:7, 1:2] 3.0953 0.0355 0.106 0.5901 -0.5898 ...
  ..$ qraux: num [1:2] 1.2 1.02
  ..$ pivot: int [1:2] 1 2
  ..$ tol  : num 1e-07
  ..$ rank : int 2
  ..- attr(*, "class")= chr "qr"
 $ df.residual  : int 5
> 
> str(lm. <- lm.fit (x = X, y = y))
List of 8
 $ coefficients : Named num [1:2] 0.132 -0.553
  ..- attr(*, "names")= chr [1:2] "x1" "x2"
 $ residuals    : num [1:7] 0.06433 -1.14333 -0.00708 1.69606 0.09095 ...
 $ effects      : Named num [1:7] 0.791 1.476 -0.415 1.983 0.133 ...
  ..- attr(*, "names")= chr [1:7] "x1" "x2" "" "" ...
 $ rank         : int 2
 $ fitted.values: num [1:7] -0.146 -0.688 0.366 -0.804 0.399 ...
 $ assign       : NULL
 $ qr           :List of 5
  ..$ qr   : num [1:7, 1:2] 3.321 0.298 0.414 0.408 -0.601 ...
  ..$ qraux: num [1:2] 1.34 1.32
  ..$ pivot: int [1:2] 1 2
  ..$ tol  : num 1e-07
  ..$ rank : int 2
  ..- attr(*, "class")= chr "qr"
 $ df.residual  : int 5
> ## Don't show: 
>   ## These are the same calculations at C level, but a parallel BLAS
>   ## might not do them the same way twice, and if seems serial MKL does not.
>   lm.. <- .lm.fit(X,y)
>   lm.w <- .lm.fit(X*sqrt(w), y*sqrt(w))
>   id <- function(x, y) all.equal(x, y, tolerance = 1e-15, scale = 1)
>   stopifnot(id(unname(lm.$coefficients), lm..$coefficients),
+ 	    id(unname(lmw$coefficients), lm.w$coefficients))
> ## End(Don't show)
> ## fits w/o intercept:
> all.equal(unname(coef(lm(y ~ X-1))),
+           unname(coef( lm.fit(X,y))))
[1] TRUE
> all.equal(unname(coef( lm.fit(X,y))),
+                  coef(.lm.fit(X,y)))
[1] TRUE
> 
> 
> 
> cleanEx()
> nameEx("loess")
> ### * loess
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: loess
> ### Title: Local Polynomial Regression Fitting
> ### Aliases: loess
> ### Keywords: smooth loess
> 
> ### ** Examples
> 
> cars.lo <- loess(dist ~ speed, cars)
> predict(cars.lo, data.frame(speed = seq(5, 30, 1)), se = TRUE)
$fit
        1         2         3         4         5         6         7         8 
 7.797353 10.002308 12.499786 15.281082 18.446568 21.865315 25.517015 29.350386 
        9        10        11        12        13        14        15        16 
33.230660 37.167935 41.205226 45.055736 48.355889 49.824812 51.986702 56.461318 
       17        18        19        20        21        22        23        24 
61.959729 68.569313 76.316068 85.212121 95.324047        NA        NA        NA 
       25        26 
       NA        NA 

$se.fit
       1        2        3        4        5        6        7        8 
7.568120 5.945831 4.990827 4.545284 4.308639 4.115049 3.789542 3.716231 
       9       10       11       12       13       14       15       16 
3.776947 4.091747 4.709568 4.245427 4.035929 3.753410 4.004705 4.043190 
      17       18       19       20       21       22       23       24 
4.026105 4.074664 4.570818 5.954217 8.302014       NA       NA       NA 
      25       26 
      NA       NA 

$residual.scale
[1] 15.29496

$df
[1] 44.6179

> # to allow extrapolation
> cars.lo2 <- loess(dist ~ speed, cars,
+                   control = loess.control(surface = "direct"))
> predict(cars.lo2, data.frame(speed = seq(5, 30, 1)), se = TRUE)
$fit
         1          2          3          4          5          6          7 
  7.741006   9.926596  12.442424  15.281082  18.425712  21.865315  25.713413 
         8          9         10         11         12         13         14 
 29.350386  33.230660  37.167935  41.205226  45.781544  48.355889  50.067148 
        15         16         17         18         19         20         21 
 51.986702  56.445263  62.025404  68.569313  76.193111  85.053364  95.300523 
        22         23         24         25         26 
106.974661 120.092581 134.665851 150.698545 168.190283 

$se.fit
        1         2         3         4         5         6         7         8 
 7.565991  5.959097  5.012013  4.550013  4.321596  4.119331  3.939804  3.720098 
        9        10        11        12        13        14        15        16 
 3.780877  4.096004  4.714469  4.398936  4.040129  4.184257  4.008873  4.061865 
       17        18        19        20        21        22        23        24 
 4.033998  4.078904  4.584606  5.952480  8.306901 11.601911 15.792480 20.864660 
       25        26 
26.823827 33.683999 

$residual.scale
[1] 15.31087

$df
[1] 44.55085

> 
> 
> 
> cleanEx()
> nameEx("logLik")
> ### * logLik
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: logLik
> ### Title: Extract Log-Likelihood
> ### Aliases: logLik logLik.lm
> ### Keywords: models
> 
> ### ** Examples
> 
> x <- 1:5
> lmx <- lm(x ~ 1)
> logLik(lmx) # using print.logLik() method
'log Lik.' -8.827561 (df=2)
> utils::str(logLik(lmx))
Class 'logLik' : -8.828 (df=2)
> 
> ## lm method
> (fm1 <- lm(rating ~ ., data = attitude))

Call:
lm(formula = rating ~ ., data = attitude)

Coefficients:
(Intercept)   complaints   privileges     learning       raises     critical  
   10.78708      0.61319     -0.07305      0.32033      0.08173      0.03838  
    advance  
   -0.21706  

> logLik(fm1)
'log Lik.' -97.24991 (df=8)
> logLik(fm1, REML = TRUE)
'log Lik.' -102.6851 (df=8)
> 
> 
> 
> cleanEx()
> nameEx("loglin")
> ### * loglin
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: loglin
> ### Title: Fitting Log-Linear Models
> ### Aliases: loglin
> ### Keywords: category models
> 
> ### ** Examples
> 
> ## Model of joint independence of sex from hair and eye color.
> fm <- loglin(HairEyeColor, list(c(1, 2), c(1, 3), c(2, 3)))
5 iterations: deviation 0.04093795 
> fm
$lrt
[1] 6.761258

$pearson
[1] 6.868292

$df
[1] 9

$margin
$margin[[1]]
[1] "Hair" "Eye" 

$margin[[2]]
[1] "Hair" "Sex" 

$margin[[3]]
[1] "Eye" "Sex"


> 1 - pchisq(fm$lrt, fm$df)
[1] 0.66196
> ## Model with no three-factor interactions fits well.
> 
> 
> 
> cleanEx()
> nameEx("lowess")
> ### * lowess
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: lowess
> ### Title: Scatter Plot Smoothing
> ### Aliases: lowess
> ### Keywords: smooth
> 
> ### ** Examples
> 
> require(graphics)
> 
> plot(cars, main = "lowess(cars)")
> lines(lowess(cars), col = 2)
> lines(lowess(cars, f = .2), col = 3)
> legend(5, 120, c(paste("f = ", c("2/3", ".2"))), lty = 1, col = 2:3)
> 
> 
> 
> cleanEx()
> nameEx("ls.diag")
> ### * ls.diag
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ls.diag
> ### Title: Compute Diagnostics for 'lsfit' Regression Results
> ### Aliases: ls.diag
> ### Keywords: regression
> 
> ### ** Examples
> 
> ## Don't show: 
> utils::example("lm", echo = FALSE)
> ## End(Don't show)
> ##-- Using the same data as the lm(.) example:
> lsD9 <- lsfit(x = as.numeric(gl(2, 10, 20)), y = weight)
> dlsD9 <- ls.diag(lsD9)
> abs(1 - sum(dlsD9$hat) / 2) < 10*.Machine$double.eps # sum(h.ii) = p
[1] TRUE
> plot(dlsD9$hat, dlsD9$stud.res, xlim = c(0, 0.11))
> abline(h = 0, lty = 2, col = "lightgray")
> 
> 
> 
> cleanEx()
> nameEx("lsfit")
> ### * lsfit
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: lsfit
> ### Title: Find the Least Squares Fit
> ### Aliases: lsfit
> ### Keywords: regression
> 
> ### ** Examples
> 
> ## Don't show: 
> utils::example("lm", echo = FALSE)
> ## End(Don't show)
> ##-- Using the same data as the lm(.) example:
> lsD9 <- lsfit(x = unclass(gl(2, 10)), y = weight)
> ls.print(lsD9)
Residual Standard Error=0.6964
R-Square=0.0731
F-statistic (df=1, 18)=1.4191
p-value=0.249

          Estimate Std.Err t-value Pr(>|t|)
Intercept    5.403  0.4924 10.9723    0.000
X           -0.371  0.3114 -1.1913    0.249


> 
> 
> 
> cleanEx()
> nameEx("mad")
> ### * mad
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: mad
> ### Title: Median Absolute Deviation
> ### Aliases: mad
> ### Keywords: univar robust
> 
> ### ** Examples
> 
> mad(c(1:9))
[1] 2.9652
> print(mad(c(1:9),     constant = 1)) ==
+       mad(c(1:8, 100), constant = 1)       # = 2 ; TRUE
[1] 2
[1] TRUE
> x <- c(1,2,3,5,7,8)
> sort(abs(x - median(x)))
[1] 1 1 2 3 3 4
> c(mad(x, constant = 1),
+   mad(x, constant = 1, low = TRUE),
+   mad(x, constant = 1, high = TRUE))
[1] 2.5 2.0 3.0
> 
> 
> 
> cleanEx()
> nameEx("mahalanobis")
> ### * mahalanobis
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: mahalanobis
> ### Title: Mahalanobis Distance
> ### Aliases: mahalanobis
> ### Keywords: multivariate
> 
> ### ** Examples
> 
> require(graphics)
> 
> ma <- cbind(1:6, 1:3)
> (S <-  var(ma))
     [,1] [,2]
[1,]  3.5  0.8
[2,]  0.8  0.8
> mahalanobis(c(0, 0), 1:2, S)
[1] 5.37037
> 
> x <- matrix(rnorm(100*3), ncol = 3)
> stopifnot(mahalanobis(x, 0, diag(ncol(x))) == rowSums(x*x))
>         ##- Here, D^2 = usual squared Euclidean distances
> 
> Sx <- cov(x)
> D2 <- mahalanobis(x, colMeans(x), Sx)
> plot(density(D2, bw = 0.5),
+      main="Squared Mahalanobis distances, n=100, p=3") ; rug(D2)
> qqplot(qchisq(ppoints(100), df = 3), D2,
+        main = expression("Q-Q plot of Mahalanobis" * ~D^2 *
+                          " vs. quantiles of" * ~ chi[3]^2))
> abline(0, 1, col = 'gray')
> 
> 
> 
> cleanEx()
> nameEx("make.link")
> ### * make.link
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: make.link
> ### Title: Create a Link for GLM Families
> ### Aliases: make.link
> ### Keywords: models
> 
> ### ** Examples
> 
> utils::str(make.link("logit"))
List of 5
 $ linkfun :function (mu)  
 $ linkinv :function (eta)  
 $ mu.eta  :function (eta)  
 $ valideta:function (eta)  
 $ name    : chr "logit"
 - attr(*, "class")= chr "link-glm"
> 
> 
> 
> cleanEx()
> nameEx("makepredictcall")
> ### * makepredictcall
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: makepredictcall
> ### Title: Utility Function for Safe Prediction
> ### Aliases: makepredictcall makepredictcall.default SafePrediction
> ### Keywords: models
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## using poly: this did not work in R < 1.5.0
> fm <- lm(weight ~ poly(height, 2), data = women)
> plot(women, xlab = "Height (in)", ylab = "Weight (lb)")
> ht <- seq(57, 73, length.out = 200)
> nD <- data.frame(height = ht)
> pfm <- predict(fm, nD)
> lines(ht, pfm)
> pf2 <- predict(update(fm, ~ stats::poly(height, 2)), nD)
> stopifnot(all.equal(pfm, pf2)) ## was off (rel.diff. 0.0766) in R <= 3.5.0
> 
> ## see also example(cars)
> 
> ## see bs and ns for spline examples.
> 
> 
> 
> cleanEx()
> nameEx("manova")
> ### * manova
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: manova
> ### Title: Multivariate Analysis of Variance
> ### Aliases: manova
> ### Keywords: models
> 
> ### ** Examples
> 
> ## Set orthogonal contrasts.
> op <- options(contrasts = c("contr.helmert", "contr.poly"))
> 
> ## Fake a 2nd response variable
> npk2 <- within(npk, foo <- rnorm(24))
> ( npk2.aov <- manova(cbind(yield, foo) ~ block + N*P*K, npk2) )
Call:
   manova(cbind(yield, foo) ~ block + N * P * K, npk2)

Terms:
                   block        N        P        K      N:P      N:K      P:K
yield           343.2950 189.2817   8.4017  95.2017  21.2817  33.1350   0.4817
foo               2.9548   0.0223   1.3807   0.0097   2.7585   0.9724   5.5258
Deg. of Freedom        5        1        1        1        1        1        1
                Residuals
yield            185.2867
foo                7.8286
Deg. of Freedom        12

Residual standard errors: 3.929447 0.8077039
1 out of 13 effects not estimable
Estimated effects are balanced
> summary(npk2.aov)
          Df  Pillai approx F num Df den Df  Pr(>F)  
block      5 0.89478   1.9430     10     24 0.08861 .
N          1 0.50586   5.6304      2     11 0.02071 *
P          1 0.17088   1.1336      2     11 0.35677  
K          1 0.34430   2.8879      2     11 0.09815 .
N:P        1 0.30158   2.3750      2     11 0.13888  
N:K        1 0.21654   1.5201      2     11 0.26127  
P:K        1 0.41992   3.9814      2     11 0.05003 .
Residuals 12                                         
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> 
> ( npk2.aovE <- manova(cbind(yield, foo) ~  N*P*K + Error(block), npk2) )

Call:
manova(cbind(yield, foo) ~ N * P * K + Error(block), npk2)

Grand Means:
     yield        foo 
54.8750000  0.1498669 

Stratum 1: block

Terms:
                    N:P:K Residuals
yield            37.00167 306.29333
foo               0.06988   2.88496
Deg. of Freedom         1         4

Residual standard errors: 8.750619 0.8492579
Estimated effects are balanced

Stratum 2: Within

Terms:
                        N         P         K       N:P       N:K       P:K
yield           189.28167   8.40167  95.20167  21.28167  33.13500   0.48167
foo               0.02235   1.38066   0.00966   2.75852   0.97240   5.52584
Deg. of Freedom         1         1         1         1         1         1
                Residuals
yield           185.28667
foo               7.82863
Deg. of Freedom        12

Residual standard errors: 3.929447 0.8077039
Estimated effects are balanced
> summary(npk2.aovE)

Error: block
          Df  Pillai approx F num Df den Df Pr(>F)
N:P:K      1 0.20004   0.3751      2      3 0.7155
Residuals  4                                      

Error: Within
          Df  Pillai approx F num Df den Df  Pr(>F)  
N          1 0.50586   5.6304      2     11 0.02071 *
P          1 0.17088   1.1336      2     11 0.35677  
K          1 0.34430   2.8879      2     11 0.09815 .
N:P        1 0.30158   2.3750      2     11 0.13888  
N:K        1 0.21654   1.5201      2     11 0.26127  
P:K        1 0.41992   3.9814      2     11 0.05003 .
Residuals 12                                         
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> 
> 
> 
> base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly"))
> cleanEx()
> nameEx("mantelhaen.test")
> ### * mantelhaen.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: mantelhaen.test
> ### Title: Cochran-Mantel-Haenszel Chi-Squared Test for Count Data
> ### Aliases: mantelhaen.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Agresti (1990), pages 231--237, Penicillin and Rabbits
> ## Investigation of the effectiveness of immediately injected or 1.5
> ##  hours delayed penicillin in protecting rabbits against a lethal
> ##  injection with beta-hemolytic streptococci.
> Rabbits <-
+ array(c(0, 0, 6, 5,
+         3, 0, 3, 6,
+         6, 2, 0, 4,
+         5, 6, 1, 0,
+         2, 5, 0, 0),
+       dim = c(2, 2, 5),
+       dimnames = list(
+           Delay = c("None", "1.5h"),
+           Response = c("Cured", "Died"),
+           Penicillin.Level = c("1/8", "1/4", "1/2", "1", "4")))
> Rabbits
, , Penicillin.Level = 1/8

      Response
Delay  Cured Died
  None     0    6
  1.5h     0    5

, , Penicillin.Level = 1/4

      Response
Delay  Cured Died
  None     3    3
  1.5h     0    6

, , Penicillin.Level = 1/2

      Response
Delay  Cured Died
  None     6    0
  1.5h     2    4

, , Penicillin.Level = 1

      Response
Delay  Cured Died
  None     5    1
  1.5h     6    0

, , Penicillin.Level = 4

      Response
Delay  Cured Died
  None     2    0
  1.5h     5    0

> ## Classical Mantel-Haenszel test
> mantelhaen.test(Rabbits)

	Mantel-Haenszel chi-squared test with continuity correction

data:  Rabbits
Mantel-Haenszel X-squared = 3.9286, df = 1, p-value = 0.04747
alternative hypothesis: true common odds ratio is not equal to 1
95 percent confidence interval:
  1.026713 47.725133
sample estimates:
common odds ratio 
                7 

> ## => p = 0.047, some evidence for higher cure rate of immediate
> ##               injection
> ## Exact conditional test
> mantelhaen.test(Rabbits, exact = TRUE)

	Exact conditional test of independence in 2 x 2 x k tables

data:  Rabbits
S = 16, p-value = 0.03994
alternative hypothesis: true common odds ratio is not equal to 1
95 percent confidence interval:
   1.077401 529.837399
sample estimates:
common odds ratio 
         10.36102 

> ## => p - 0.040
> ## Exact conditional test for one-sided alternative of a higher
> ## cure rate for immediate injection
> mantelhaen.test(Rabbits, exact = TRUE, alternative = "greater")

	Exact conditional test of independence in 2 x 2 x k tables

data:  Rabbits
S = 16, p-value = 0.01997
alternative hypothesis: true common odds ratio is greater than 1
95 percent confidence interval:
 1.384239      Inf
sample estimates:
common odds ratio 
         10.36102 

> ## => p = 0.020
> 
> ## UC Berkeley Student Admissions
> mantelhaen.test(UCBAdmissions)

	Mantel-Haenszel chi-squared test with continuity correction

data:  UCBAdmissions
Mantel-Haenszel X-squared = 1.4269, df = 1, p-value = 0.2323
alternative hypothesis: true common odds ratio is not equal to 1
95 percent confidence interval:
 0.7719074 1.0603298
sample estimates:
common odds ratio 
        0.9046968 

> ## No evidence for association between admission and gender
> ## when adjusted for department.  However,
> apply(UCBAdmissions, 3, function(x) (x[1,1]*x[2,2])/(x[1,2]*x[2,1]))
        A         B         C         D         E         F 
0.3492120 0.8025007 1.1330596 0.9212838 1.2216312 0.8278727 
> ## This suggests that the assumption of homogeneous (conditional)
> ## odds ratios may be violated.  The traditional approach would be
> ## using the Woolf test for interaction:
> woolf <- function(x) {
+   x <- x + 1 / 2
+   k <- dim(x)[3]
+   or <- apply(x, 3, function(x) (x[1,1]*x[2,2])/(x[1,2]*x[2,1]))
+   w <-  apply(x, 3, function(x) 1 / sum(1 / x))
+   1 - pchisq(sum(w * (log(or) - weighted.mean(log(or), w)) ^ 2), k - 1)
+ }
> woolf(UCBAdmissions)
[1] 0.0034272
> ## => p = 0.003, indicating that there is significant heterogeneity.
> ## (And hence the Mantel-Haenszel test cannot be used.)
> 
> ## Agresti (2002), p. 287f and p. 297.
> ## Job Satisfaction example.
> Satisfaction <-
+     as.table(array(c(1, 2, 0, 0, 3, 3, 1, 2,
+                      11, 17, 8, 4, 2, 3, 5, 2,
+                      1, 0, 0, 0, 1, 3, 0, 1,
+                      2, 5, 7, 9, 1, 1, 3, 6),
+                    dim = c(4, 4, 2),
+                    dimnames =
+                    list(Income =
+                         c("<5000", "5000-15000",
+                           "15000-25000", ">25000"),
+                         "Job Satisfaction" =
+                         c("V_D", "L_S", "M_S", "V_S"),
+                         Gender = c("Female", "Male"))))
> ## (Satisfaction categories abbreviated for convenience.)
> ftable(. ~ Gender + Income, Satisfaction)
                   Job Satisfaction V_D L_S M_S V_S
Gender Income                                      
Female <5000                          1   3  11   2
       5000-15000                     2   3  17   3
       15000-25000                    0   1   8   5
       >25000                         0   2   4   2
Male   <5000                          1   1   2   1
       5000-15000                     0   3   5   1
       15000-25000                    0   0   7   3
       >25000                         0   1   9   6
> ## Table 7.8 in Agresti (2002), p. 288.
> mantelhaen.test(Satisfaction)

	Cochran-Mantel-Haenszel test

data:  Satisfaction
Cochran-Mantel-Haenszel M^2 = 10.2, df = 9, p-value = 0.3345

> ## See Table 7.12 in Agresti (2002), p. 297.
> 
> 
> 
> cleanEx()
> nameEx("mauchly.test")
> ### * mauchly.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: mauchly.test
> ### Title: Mauchly's Test of Sphericity
> ### Aliases: mauchly.test mauchly.test.SSD mauchly.test.mlm
> ### Keywords: htest models multivariate
> 
> ### ** Examples
> 
> utils::example(SSD) # Brings in the mlmfit and reacttime objects

SSD> # Lifted from Baron+Li:
SSD> # "Notes on the use of R for psychology experiments and questionnaires"
SSD> # Maxwell and Delaney, p. 497
SSD> reacttime <- matrix(c(
SSD+ 420, 420, 480, 480, 600, 780,
SSD+ 420, 480, 480, 360, 480, 600,
SSD+ 480, 480, 540, 660, 780, 780,
SSD+ 420, 540, 540, 480, 780, 900,
SSD+ 540, 660, 540, 480, 660, 720,
SSD+ 360, 420, 360, 360, 480, 540,
SSD+ 480, 480, 600, 540, 720, 840,
SSD+ 480, 600, 660, 540, 720, 900,
SSD+ 540, 600, 540, 480, 720, 780,
SSD+ 480, 420, 540, 540, 660, 780),
SSD+ ncol = 6, byrow = TRUE,
SSD+ dimnames = list(subj = 1:10,
SSD+               cond = c("deg0NA", "deg4NA", "deg8NA",
SSD+                        "deg0NP", "deg4NP", "deg8NP")))

SSD> mlmfit <- lm(reacttime ~ 1)

SSD> SSD(mlmfit)
$SSD
        cond
cond     deg0NA deg4NA deg8NA deg0NP deg4NP deg8NP
  deg0NA  29160  30600  26640  23760  32400  25560
  deg4NA  30600  66600  32400   7200  36000  30600
  deg8NA  26640  32400  56160  41040  57600  69840
  deg0NP  23760   7200  41040  70560  72000  63360
  deg4NP  32400  36000  57600  72000 108000 100800
  deg8NP  25560  30600  69840  63360 100800 122760

$call
lm(formula = reacttime ~ 1)

$df
[1] 9

attr(,"class")
[1] "SSD"

SSD> estVar(mlmfit)
        cond
cond     deg0NA deg4NA deg8NA deg0NP deg4NP deg8NP
  deg0NA   3240   3400   2960   2640   3600   2840
  deg4NA   3400   7400   3600    800   4000   3400
  deg8NA   2960   3600   6240   4560   6400   7760
  deg0NP   2640    800   4560   7840   8000   7040
  deg4NP   3600   4000   6400   8000  12000  11200
  deg8NP   2840   3400   7760   7040  11200  13640
> 
> ### traditional test of intrasubj. contrasts
> mauchly.test(mlmfit, X = ~1)

	Mauchly's test of sphericity
	Contrasts orthogonal to
	~1


data:  SSD matrix from lm(formula = reacttime ~ 1)
W = 0.031084, p-value = 0.04765

> 
> ### tests using intra-subject 3x2 design
> idata <- data.frame(deg = gl(3, 1, 6, labels = c(0,4,8)),
+                     noise = gl(2, 3, 6, labels = c("A","P")))
> mauchly.test(mlmfit, X = ~ deg + noise, idata = idata)

	Mauchly's test of sphericity
	Contrasts orthogonal to
	~deg + noise


data:  SSD matrix from lm(formula = reacttime ~ 1)
W = 0.89378, p-value = 0.6381

> mauchly.test(mlmfit, M = ~ deg + noise, X = ~ noise, idata = idata)

	Mauchly's test of sphericity
	Contrasts orthogonal to
	~noise

	Contrasts spanned by
	~deg + noise


data:  SSD matrix from lm(formula = reacttime ~ 1)
W = 0.96011, p-value = 0.8497

> 
> 
> 
> cleanEx()
> nameEx("mcnemar.test")
> ### * mcnemar.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: mcnemar.test
> ### Title: McNemar's Chi-squared Test for Count Data
> ### Aliases: mcnemar.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Agresti (1990), p. 350.
> ## Presidential Approval Ratings.
> ##  Approval of the President's performance in office in two surveys,
> ##  one month apart, for a random sample of 1600 voting-age Americans.
> Performance <-
+ matrix(c(794, 86, 150, 570),
+        nrow = 2,
+        dimnames = list("1st Survey" = c("Approve", "Disapprove"),
+                        "2nd Survey" = c("Approve", "Disapprove")))
> Performance
            2nd Survey
1st Survey   Approve Disapprove
  Approve        794        150
  Disapprove      86        570
> mcnemar.test(Performance)

	McNemar's Chi-squared test with continuity correction

data:  Performance
McNemar's chi-squared = 16.818, df = 1, p-value = 4.115e-05

> ## => significant change (in fact, drop) in approval ratings
> 
> 
> 
> cleanEx()
> nameEx("median")
> ### * median
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: median
> ### Title: Median Value
> ### Aliases: median median.default
> ### Keywords: univar robust
> 
> ### ** Examples
> 
> median(1:4)                # = 2.5 [even number]
[1] 2.5
> median(c(1:3, 100, 1000))  # = 3 [odd, robust]
[1] 3
> 
> 
> 
> cleanEx()
> nameEx("medpolish")
> ### * medpolish
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: medpolish
> ### Title: Median Polish (Robust Two-way Decomposition) of a Matrix
> ### Aliases: medpolish
> ### Keywords: robust
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Deaths from sport parachuting;  from ABC of EDA, p.224:
> deaths <-
+     rbind(c(14,15,14),
+           c( 7, 4, 7),
+           c( 8, 2,10),
+           c(15, 9,10),
+           c( 0, 2, 0))
> dimnames(deaths) <- list(c("1-24", "25-74", "75-199", "200++", "NA"),
+                          paste(1973:1975))
> deaths
       1973 1974 1975
1-24     14   15   14
25-74     7    4    7
75-199    8    2   10
200++    15    9   10
NA        0    2    0
> (med.d <- medpolish(deaths))
1: 19
Final: 19

Median Polish Results (Dataset: "deaths")

Overall: 8

Row Effects:
  1-24  25-74 75-199  200++     NA 
     6     -1      0      2     -8 

Column Effects:
1973 1974 1975 
   0   -1    0 

Residuals:
       1973 1974 1975
1-24      0    2    0
25-74     0   -2    0
75-199    0   -5    2
200++     5    0    0
NA        0    3    0

> plot(med.d)
> ## Check decomposition:
> all(deaths ==
+     med.d$overall + outer(med.d$row,med.d$col, `+`) + med.d$residuals)
[1] TRUE
> 
> 
> 
> cleanEx()
> nameEx("model.extract")
> ### * model.extract
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: model.extract
> ### Title: Extract Components from a Model Frame
> ### Aliases: model.extract model.offset model.response model.weights
> ### Keywords: manip programming models
> 
> ### ** Examples
> 
> a <- model.frame(cbind(ncases,ncontrols) ~ agegp + tobgp + alcgp, data = esoph)
> model.extract(a, "response")
   ncases ncontrols
1       0        40
2       0        10
3       0         6
4       0         5
5       0        27
6       0         7
7       0         4
8       0         7
9       0         2
10      0         1
11      0         2
12      0         1
13      1         0
14      0         1
15      0         2
16      0        60
17      1        13
18      0         7
19      0         8
20      0        35
21      3        20
22      1        13
23      0         8
24      0        11
25      0         6
26      0         2
27      0         1
28      2         1
29      0         3
30      2         2
31      1        45
32      0        18
33      0        10
34      0         4
35      6        32
36      4        17
37      5        10
38      5         2
39      3        13
40      6         8
41      1         4
42      2         2
43      4         0
44      3         1
45      2         1
46      4         0
47      2        47
48      3        19
49      3         9
50      4         2
51      9        31
52      6        15
53      4        13
54      3         3
55      9         9
56      8         7
57      3         3
58      4         0
59      5         5
60      6         1
61      2         1
62      5         1
63      5        43
64      4        10
65      2         5
66      0         2
67     17        17
68      3         7
69      5         4
70      6         7
71      4         8
72      2         1
73      1         0
74      3         1
75      1         1
76      1         0
77      1         0
78      1        17
79      2         4
80      1         2
81      2         3
82      1         2
83      0         3
84      1         0
85      1         0
86      1         0
87      2         0
88      1         0
> stopifnot(model.extract(a, "response") == model.response(a))
> 
> a <- model.frame(ncases/(ncases+ncontrols) ~ agegp + tobgp + alcgp,
+                  data = esoph, weights = ncases+ncontrols)
> model.response(a)
         1          2          3          4          5          6          7 
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 
         8          9         10         11         12         13         14 
0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 1.00000000 0.00000000 
        15         16         17         18         19         20         21 
0.00000000 0.00000000 0.07142857 0.00000000 0.00000000 0.00000000 0.13043478 
        22         23         24         25         26         27         28 
0.07142857 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000 0.66666667 
        29         30         31         32         33         34         35 
0.00000000 0.50000000 0.02173913 0.00000000 0.00000000 0.00000000 0.15789474 
        36         37         38         39         40         41         42 
0.19047619 0.33333333 0.71428571 0.18750000 0.42857143 0.20000000 0.50000000 
        43         44         45         46         47         48         49 
1.00000000 0.75000000 0.66666667 1.00000000 0.04081633 0.13636364 0.25000000 
        50         51         52         53         54         55         56 
0.66666667 0.22500000 0.28571429 0.23529412 0.50000000 0.50000000 0.53333333 
        57         58         59         60         61         62         63 
0.50000000 1.00000000 0.50000000 0.85714286 0.66666667 0.83333333 0.10416667 
        64         65         66         67         68         69         70 
0.28571429 0.28571429 0.00000000 0.50000000 0.30000000 0.55555556 0.46153846 
        71         72         73         74         75         76         77 
0.33333333 0.66666667 1.00000000 0.75000000 0.50000000 1.00000000 1.00000000 
        78         79         80         81         82         83         84 
0.05555556 0.33333333 0.33333333 0.40000000 0.33333333 0.00000000 1.00000000 
        85         86         87         88 
1.00000000 1.00000000 1.00000000 1.00000000 
> (mw <- model.extract(a, "weights"))
 1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 
40 10  6  5 27  7  4  7  2  1  2  1  1  1  2 60 14  7  8 35 23 14  8 11  6  2 
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 
 1  3  3  4 46 18 10  4 38 21 15  7 16 14  5  4  4  4  3  4 49 22 12  6 40 21 
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 
17  6 18 15  6  4 10  7  3  6 48 14  7  2 34 10  9 13 12  3  1  4  2  1  1 18 
79 80 81 82 83 84 85 86 87 88 
 6  3  5  3  3  1  1  1  2  1 
> stopifnot(identical(unname(mw), model.weights(a)))
> 
> a <- model.frame(cbind(ncases,ncontrols) ~ agegp,
+                  something = tobgp, data = esoph)
> names(a)
[1] "cbind(ncases, ncontrols)" "agegp"                   
[3] "(something)"             
> stopifnot(model.extract(a, "something") == esoph$tobgp)
> 
> 
> 
> cleanEx()
> nameEx("model.frame")
> ### * model.frame
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: model.frame
> ### Title: Extracting the Model Frame from a Formula or Fit
> ### Aliases: model.frame model.frame.default model.frame.lm model.frame.glm
> ###   model.frame.aovlist get_all_vars
> ### Keywords: models
> 
> ### ** Examples
> 
> data.class(model.frame(dist ~ speed, data = cars))
[1] "data.frame"
> 
> ## using a subset and an extra variable
> model.frame(dist ~ speed, data = cars, subset = speed < 10, z = log(dist))
  dist speed       (z)
1    2     4 0.6931472
2   10     4 2.3025851
3    4     7 1.3862944
4   22     7 3.0910425
5   16     8 2.7725887
6   10     9 2.3025851
> 
> ## get_all_vars(): new var.s are recycled (iff length matches: 50 = 2*25)
> ncars <- get_all_vars(sqrt(dist) ~ I(speed/2), data = cars, newVar = 2:3)
> stopifnot(is.data.frame(ncars),
+           identical(cars, ncars[,names(cars)]),
+           ncol(ncars) == ncol(cars) + 1)
> 
> 
> 
> cleanEx()
> nameEx("model.matrix")
> ### * model.matrix
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: model.matrix
> ### Title: Construct Design Matrices
> ### Aliases: model.matrix model.matrix.default model.matrix.lm
> ### Keywords: models
> 
> ### ** Examples
> 
> ff <- log(Volume) ~ log(Height) + log(Girth)
> utils::str(m <- model.frame(ff, trees))
'data.frame':	31 obs. of  3 variables:
 $ log(Volume): num  2.33 2.33 2.32 2.8 2.93 ...
 $ log(Height): num  4.25 4.17 4.14 4.28 4.39 ...
 $ log(Girth) : num  2.12 2.15 2.17 2.35 2.37 ...
 - attr(*, "terms")=Classes 'terms', 'formula'  language log(Volume) ~ log(Height) + log(Girth)
  .. ..- attr(*, "variables")= language list(log(Volume), log(Height), log(Girth))
  .. ..- attr(*, "factors")= int [1:3, 1:2] 0 1 0 0 0 1
  .. .. ..- attr(*, "dimnames")=List of 2
  .. .. .. ..$ : chr [1:3] "log(Volume)" "log(Height)" "log(Girth)"
  .. .. .. ..$ : chr [1:2] "log(Height)" "log(Girth)"
  .. ..- attr(*, "term.labels")= chr [1:2] "log(Height)" "log(Girth)"
  .. ..- attr(*, "order")= int [1:2] 1 1
  .. ..- attr(*, "intercept")= int 1
  .. ..- attr(*, "response")= int 1
  .. ..- attr(*, ".Environment")=<environment: R_GlobalEnv> 
  .. ..- attr(*, "predvars")= language list(log(Volume), log(Height), log(Girth))
  .. ..- attr(*, "dataClasses")= Named chr [1:3] "numeric" "numeric" "numeric"
  .. .. ..- attr(*, "names")= chr [1:3] "log(Volume)" "log(Height)" "log(Girth)"
> mat <- model.matrix(ff, m)
> 
> dd <- data.frame(a = gl(3,4), b = gl(4,1,12)) # balanced 2-way
> options("contrasts") # typically 'treatment' (for unordered factors)
$contrasts
        unordered           ordered 
"contr.treatment"      "contr.poly" 

> model.matrix(~ a + b, dd)
   (Intercept) a2 a3 b2 b3 b4
1            1  0  0  0  0  0
2            1  0  0  1  0  0
3            1  0  0  0  1  0
4            1  0  0  0  0  1
5            1  1  0  0  0  0
6            1  1  0  1  0  0
7            1  1  0  0  1  0
8            1  1  0  0  0  1
9            1  0  1  0  0  0
10           1  0  1  1  0  0
11           1  0  1  0  1  0
12           1  0  1  0  0  1
attr(,"assign")
[1] 0 1 1 2 2 2
attr(,"contrasts")
attr(,"contrasts")$a
[1] "contr.treatment"

attr(,"contrasts")$b
[1] "contr.treatment"

> model.matrix(~ a + b, dd, contrasts.arg = list(a = "contr.sum"))
   (Intercept) a1 a2 b2 b3 b4
1            1  1  0  0  0  0
2            1  1  0  1  0  0
3            1  1  0  0  1  0
4            1  1  0  0  0  1
5            1  0  1  0  0  0
6            1  0  1  1  0  0
7            1  0  1  0  1  0
8            1  0  1  0  0  1
9            1 -1 -1  0  0  0
10           1 -1 -1  1  0  0
11           1 -1 -1  0  1  0
12           1 -1 -1  0  0  1
attr(,"assign")
[1] 0 1 1 2 2 2
attr(,"contrasts")
attr(,"contrasts")$a
[1] "contr.sum"

attr(,"contrasts")$b
[1] "contr.treatment"

> model.matrix(~ a + b, dd, contrasts.arg = list(a = "contr.sum", b = contr.poly))
   (Intercept) a1 a2        b.L  b.Q        b.C
1            1  1  0 -0.6708204  0.5 -0.2236068
2            1  1  0 -0.2236068 -0.5  0.6708204
3            1  1  0  0.2236068 -0.5 -0.6708204
4            1  1  0  0.6708204  0.5  0.2236068
5            1  0  1 -0.6708204  0.5 -0.2236068
6            1  0  1 -0.2236068 -0.5  0.6708204
7            1  0  1  0.2236068 -0.5 -0.6708204
8            1  0  1  0.6708204  0.5  0.2236068
9            1 -1 -1 -0.6708204  0.5 -0.2236068
10           1 -1 -1 -0.2236068 -0.5  0.6708204
11           1 -1 -1  0.2236068 -0.5 -0.6708204
12           1 -1 -1  0.6708204  0.5  0.2236068
attr(,"assign")
[1] 0 1 1 2 2 2
attr(,"contrasts")
attr(,"contrasts")$a
[1] "contr.sum"

attr(,"contrasts")$b
          .L   .Q         .C
1 -0.6708204  0.5 -0.2236068
2 -0.2236068 -0.5  0.6708204
3  0.2236068 -0.5 -0.6708204
4  0.6708204  0.5  0.2236068

> m.orth <- model.matrix(~a+b, dd, contrasts.arg = list(a = "contr.helmert"))
> crossprod(m.orth) # m.orth is  ALMOST  orthogonal
            (Intercept) a1 a2 b2 b3 b4
(Intercept)          12  0  0  3  3  3
a1                    0  8  0  0  0  0
a2                    0  0 24  0  0  0
b2                    3  0  0  3  0  0
b3                    3  0  0  0  3  0
b4                    3  0  0  0  0  3
> # invalid contrasts.. ignored with a warning:
> stopifnot(identical(
+    model.matrix(~ a + b, dd),
+    model.matrix(~ a + b, dd, contrasts.arg = "contr.FOO")))
Warning in model.matrix.default(~a + b, dd, contrasts.arg = "contr.FOO") :
  non-list contrasts argument ignored
> 
> 
> 
> cleanEx()
> nameEx("model.tables")
> ### * model.tables
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: model.tables
> ### Title: Compute Tables of Results from an 'aov' Model Fit
> ### Aliases: model.tables model.tables.aov model.tables.aovlist
> ### Keywords: models
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("monthplot")
> ### * monthplot
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: monthplot
> ### Title: Plot a Seasonal or other Subseries from a Time Series
> ### Aliases: monthplot monthplot.default monthplot.ts monthplot.stl
> ###   monthplot.StructTS
> ### Keywords: hplot ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## The CO2 data
> fit <- stl(log(co2), s.window = 20, t.window = 20)
> plot(fit)
> op <- par(mfrow = c(2,2))
> monthplot(co2, ylab = "data", cex.axis = 0.8)
> monthplot(fit, choice = "seasonal", cex.axis = 0.8)
> monthplot(fit, choice = "trend", cex.axis = 0.8)
> monthplot(fit, choice = "remainder", type = "h", cex.axis = 0.8)
> par(op)
> 
> ## The CO2 data, grouped quarterly
> quarter <- (cycle(co2) - 1) %/% 3
> monthplot(co2, phase = quarter)
> 
> ## see also JohnsonJohnson
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("mood.test")
> ### * mood.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: mood.test
> ### Title: Mood Two-Sample Test of Scale
> ### Aliases: mood.test mood.test.default mood.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Same data as for the Ansari-Bradley test:
> ## Serum iron determination using Hyland control sera
> ramsay <- c(111, 107, 100, 99, 102, 106, 109, 108, 104, 99,
+             101, 96, 97, 102, 107, 113, 116, 113, 110, 98)
> jung.parekh <- c(107, 108, 106, 98, 105, 103, 110, 105, 104,
+             100, 96, 108, 103, 104, 114, 114, 113, 108, 106, 99)
> mood.test(ramsay, jung.parekh)

	Mood two-sample test of scale

data:  ramsay and jung.parekh
Z = 1.0371, p-value = 0.2997
alternative hypothesis: two.sided

> ## Compare this to ansari.test(ramsay, jung.parekh)
> 
> 
> 
> cleanEx()
> nameEx("na.action")
> ### * na.action
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: na.action
> ### Title: NA Action
> ### Aliases: na.action na.action.default
> ### Keywords: NA methods
> 
> ### ** Examples
> 
> na.action(na.omit(c(1, NA)))
[1] 2
attr(,"class")
[1] "omit"
> 
> 
> 
> cleanEx()
> nameEx("na.contiguous")
> ### * na.contiguous
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: na.contiguous
> ### Title: Find Longest Contiguous Stretch of non-NAs
> ### Aliases: na.contiguous na.contiguous.default
> ### Keywords: ts
> 
> ### ** Examples
> 
> na.contiguous(presidents)
     Qtr1 Qtr2 Qtr3 Qtr4
1952                  32
1953   59   74   75   60
1954   71   61   71   57
1955   71   68   79   73
1956   76   71   67   75
1957   79   62   63   57
1958   60   49   48   52
1959   57   62   61   66
1960   71   62   61   57
1961   72   83   71   78
1962   79   71   62   74
1963   76   64   62   57
1964   80   73   69   69
1965   71   64   69   62
1966   63   46   56   44
1967   44   52   38   46
1968   36   49   35   44
1969   59   65   65   56
1970   66   53   61   52
1971   51   48   54   49
1972   49   61          
> 
> 
> 
> cleanEx()
> nameEx("na.fail")
> ### * na.fail
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: na.fail
> ### Title: Handle Missing Values in Objects
> ### Aliases: na.fail na.fail.default na.omit na.omit.data.frame
> ###   na.omit.default na.exclude na.exclude.data.frame na.exclude.default
> ###   na.pass
> ### Keywords: NA
> 
> ### ** Examples
> 
> DF <- data.frame(x = c(1, 2, 3), y = c(0, 10, NA))
> na.omit(DF)
  x  y
1 1  0
2 2 10
> m <- as.matrix(DF)
> na.omit(m)
     x  y
[1,] 1  0
[2,] 2 10
attr(,"na.action")
[1] 3
attr(,"class")
[1] "omit"
> stopifnot(all(na.omit(1:3) == 1:3))  # does not affect objects with no NA's
> try(na.fail(DF))   #> Error: missing values in ...
Error in na.fail.default(DF) : missing values in object
> 
> options("na.action")
$na.action
[1] "na.omit"

> 
> 
> 
> cleanEx()
> nameEx("nextn")
> ### * nextn
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: nextn
> ### Title: Find Highly Composite Numbers
> ### Aliases: nextn
> ### Keywords: math
> 
> ### ** Examples
> 
> nextn(1001) # 1024
[1] 1024
> table(nextn(599:630))

600 625 640 
  2  25   5 
> n <- 1:100 ; plot(n, nextn(n) - n, type = "o", lwd=2, cex=1/2)
> 
> 
> 
> cleanEx()
> nameEx("nlm")
> ### * nlm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: nlm
> ### Title: Non-Linear Minimization
> ### Aliases: nlm
> ### Keywords: nonlinear optimize
> 
> ### ** Examples
> 
> f <- function(x) sum((x-1:length(x))^2)
> nlm(f, c(10,10))
$minimum
[1] 4.303458e-26

$estimate
[1] 1 2

$gradient
[1]  2.757794e-13 -3.099743e-13

$code
[1] 1

$iterations
[1] 2

> nlm(f, c(10,10), print.level = 2)
iteration = 0
Step:
[1] 0 0
Parameter:
[1] 10 10
Function Value
[1] 145
Gradient:
[1] 18.00001 16.00001

iteration = 1
Step:
[1] -9 -8
Parameter:
[1] 1 2
Function Value
[1] 1.721748e-13
Gradient:
[1] 1.551336e-06 1.379735e-06

iteration = 2
Parameter:
[1] 1 2
Function Value
[1] 4.303458e-26
Gradient:
[1]  2.757794e-13 -3.099743e-13

Relative gradient close to zero.
Current iterate is probably solution.

$minimum
[1] 4.303458e-26

$estimate
[1] 1 2

$gradient
[1]  2.757794e-13 -3.099743e-13

$code
[1] 1

$iterations
[1] 2

> utils::str(nlm(f, c(5), hessian = TRUE))
List of 6
 $ minimum   : num 2.44e-24
 $ estimate  : num 1
 $ gradient  : num 1e-06
 $ hessian   : num [1, 1] 2
 $ code      : int 1
 $ iterations: int 1
> 
> f <- function(x, a) sum((x-a)^2)
> nlm(f, c(10,10), a = c(3,5))
$minimum
[1] 3.371781e-25

$estimate
[1] 3 5

$gradient
[1]  6.750156e-13 -9.450218e-13

$code
[1] 1

$iterations
[1] 2

> f <- function(x, a)
+ {
+     res <- sum((x-a)^2)
+     attr(res, "gradient") <- 2*(x-a)
+     res
+ }
> nlm(f, c(10,10), a = c(3,5))
$minimum
[1] 0

$estimate
[1] 3 5

$gradient
[1] 0 0

$code
[1] 1

$iterations
[1] 1

> 
> ## more examples, including the use of derivatives.
> ## Not run: demo(nlm)
> 
> 
> 
> cleanEx()
> nameEx("nlminb")
> ### * nlminb
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: nlminb
> ### Title: Optimization using PORT routines
> ### Aliases: nlminb
> ### Keywords: optimize
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("nls")
> ### * nls
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: nls
> ### Title: Nonlinear Least Squares
> ### Aliases: nls
> ### Keywords: nonlinear regression models
> 
> ### ** Examples
> 
> ## Don't show: 
> od <- options(digits=5)
> ## End(Don't show)
> require(graphics)
> 
> DNase1 <- subset(DNase, Run == 1)
> 
> ## using a selfStart model
> fm1DNase1 <- nls(density ~ SSlogis(log(conc), Asym, xmid, scal), DNase1)
> summary(fm1DNase1)

Formula: density ~ SSlogis(log(conc), Asym, xmid, scal)

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
Asym   2.3452     0.0782    30.0  2.2e-13 ***
xmid   1.4831     0.0814    18.2  1.2e-10 ***
scal   1.0415     0.0323    32.3  8.5e-14 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.0192 on 13 degrees of freedom

> ## the coefficients only:
> coef(fm1DNase1)
  Asym   xmid   scal 
2.3452 1.4831 1.0415 
> ## including their SE, etc:
> coef(summary(fm1DNase1))
     Estimate Std. Error t value   Pr(>|t|)
Asym   2.3452   0.078154  30.007 2.1655e-13
xmid   1.4831   0.081353  18.230 1.2185e-10
scal   1.0415   0.032271  32.272 8.5069e-14
> 
> ## using conditional linearity
> fm2DNase1 <- nls(density ~ 1/(1 + exp((xmid - log(conc))/scal)),
+                  data = DNase1,
+                  start = list(xmid = 0, scal = 1),
+                  algorithm = "plinear")
> summary(fm2DNase1)

Formula: density ~ 1/(1 + exp((xmid - log(conc))/scal))

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
xmid   1.4831     0.0814    18.2  1.2e-10 ***
scal   1.0415     0.0323    32.3  8.5e-14 ***
.lin   2.3452     0.0782    30.0  2.2e-13 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.0192 on 13 degrees of freedom

> 
> ## without conditional linearity
> fm3DNase1 <- nls(density ~ Asym/(1 + exp((xmid - log(conc))/scal)),
+                  data = DNase1,
+                  start = list(Asym = 3, xmid = 0, scal = 1))
> summary(fm3DNase1)

Formula: density ~ Asym/(1 + exp((xmid - log(conc))/scal))

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
Asym   2.3452     0.0782    30.0  2.2e-13 ***
xmid   1.4831     0.0814    18.2  1.2e-10 ***
scal   1.0415     0.0323    32.3  8.5e-14 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.0192 on 13 degrees of freedom

> 
> ## using Port's nl2sol algorithm
> fm4DNase1 <- nls(density ~ Asym/(1 + exp((xmid - log(conc))/scal)),
+                  data = DNase1,
+                  start = list(Asym = 3, xmid = 0, scal = 1),
+                  algorithm = "port")
> summary(fm4DNase1)

Formula: density ~ Asym/(1 + exp((xmid - log(conc))/scal))

Parameters:
     Estimate Std. Error t value Pr(>|t|)    
Asym   2.3452     0.0782    30.0  2.2e-13 ***
xmid   1.4831     0.0814    18.2  1.2e-10 ***
scal   1.0415     0.0323    32.3  8.5e-14 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.0192 on 13 degrees of freedom

Algorithm "port", convergence message: relative convergence (4)

> 
> ## weighted nonlinear regression
> Treated <- Puromycin[Puromycin$state == "treated", ]
> weighted.MM <- function(resp, conc, Vm, K)
+ {
+     ## Purpose: exactly as white book p. 451 -- RHS for nls()
+     ##  Weighted version of Michaelis-Menten model
+     ## ----------------------------------------------------------
+     ## Arguments: 'y', 'x' and the two parameters (see book)
+     ## ----------------------------------------------------------
+     ## Author: Martin Maechler, Date: 23 Mar 2001
+ 
+     pred <- (Vm * conc)/(K + conc)
+     (resp - pred) / sqrt(pred)
+ }
> 
> Pur.wt <- nls( ~ weighted.MM(rate, conc, Vm, K), data = Treated,
+               start = list(Vm = 200, K = 0.1))
> summary(Pur.wt)

Formula: 0 ~ weighted.MM(rate, conc, Vm, K)

Parameters:
   Estimate Std. Error t value Pr(>|t|)    
Vm 2.07e+02   9.22e+00   22.42  7.0e-10 ***
K  5.46e-02   7.98e-03    6.84  4.5e-05 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 1.21 on 10 degrees of freedom

> 
> ## Passing arguments using a list that can not be coerced to a data.frame
> lisTreat <- with(Treated,
+                  list(conc1 = conc[1], conc.1 = conc[-1], rate = rate))
> 
> weighted.MM1 <- function(resp, conc1, conc.1, Vm, K)
+ {
+      conc <- c(conc1, conc.1)
+      pred <- (Vm * conc)/(K + conc)
+     (resp - pred) / sqrt(pred)
+ }
> Pur.wt1 <- nls( ~ weighted.MM1(rate, conc1, conc.1, Vm, K),
+                data = lisTreat, start = list(Vm = 200, K = 0.1))
> stopifnot(all.equal(coef(Pur.wt), coef(Pur.wt1)))
> 
> ## Chambers and Hastie (1992) Statistical Models in S  (p. 537):
> ## If the value of the right side [of formula] has an attribute called
> ## 'gradient' this should be a matrix with the number of rows equal
> ## to the length of the response and one column for each parameter.
> 
> weighted.MM.grad <- function(resp, conc1, conc.1, Vm, K)
+ {
+   conc <- c(conc1, conc.1)
+ 
+   K.conc <- K+conc
+   dy.dV <- conc/K.conc
+   dy.dK <- -Vm*dy.dV/K.conc
+   pred <- Vm*dy.dV
+   pred.5 <- sqrt(pred)
+   dev <- (resp - pred) / pred.5
+   Ddev <- -0.5*(resp+pred)/(pred.5*pred)
+   attr(dev, "gradient") <- Ddev * cbind(Vm = dy.dV, K = dy.dK)
+   dev
+ }
> 
> Pur.wt.grad <- nls( ~ weighted.MM.grad(rate, conc1, conc.1, Vm, K),
+                    data = lisTreat, start = list(Vm = 200, K = 0.1))
> 
> rbind(coef(Pur.wt), coef(Pur.wt1), coef(Pur.wt.grad))
         Vm        K
[1,] 206.83 0.054611
[2,] 206.83 0.054611
[3,] 206.83 0.054611
> 
> ## In this example, there seems no advantage to providing the gradient.
> ## In other cases, there might be.
> 
> 
> ## The two examples below show that you can fit a model to
> ## artificial data with noise but not to artificial data
> ## without noise.
> x <- 1:10
> y <- 2*x + 3                            # perfect fit
> ## terminates in an error, because convergence cannot be confirmed:
> try(nls(y ~ a + b*x, start = list(a = 0.12345, b = 0.54321)))
Error in nls(y ~ a + b * x, start = list(a = 0.12345, b = 0.54321)) : 
  number of iterations exceeded maximum of 50
> ## adjusting the convergence test by adding 'scaleOffset' to its denominator RSS:
> nls(y ~ a + b*x, start = list(a = 0.12345, b = 0.54321),
+     control = list(scaleOffset = 1, printEval=TRUE))
  It.   1, fac=           1, eval (no.,total): ( 1,  1): new dev = 1.05935e-12
Nonlinear regression model
  model: y ~ a + b * x
   data: parent.frame()
a b 
3 2 
 residual sum-of-squares: 1.06e-12
> ## Alternatively jittering the "too exact" values, slightly:
> set.seed(27)
> yeps <- y + rnorm(length(y), sd = 0.01) # added noise
> nls(yeps ~ a + b*x, start = list(a = 0.12345, b = 0.54321))
Nonlinear regression model
  model: yeps ~ a + b * x
   data: parent.frame()
a b 
3 2 
 residual sum-of-squares: 0.00135
> 
> 
> ## the nls() internal cheap guess for starting values can be sufficient:
> x <- -(1:100)/10
> y <- 100 + 10 * exp(x / 2) + rnorm(x)/10
> nlmod <- nls(y ~  Const + A * exp(B * x))
Warning in nls(y ~ Const + A * exp(B * x)) :
  No starting values specified for some parameters.
Initializing ‘Const’, ‘A’, ‘B’ to '1.'.
Consider specifying 'start' or using a selfStart model
> 
> plot(x,y, main = "nls(*), data, true function and fit, n=100")
> curve(100 + 10 * exp(x / 2), col = 4, add = TRUE)
> lines(x, predict(nlmod), col = 2)
> 
> 
> ## Here, requiring close convergence, must use more accurate numerical differentiation,
> ## as this typically gives Error: "step factor .. reduced below 'minFactor' .."
> ## IGNORE_RDIFF_BEGIN
> try(nlm1 <- update(nlmod, control = list(tol = 1e-7)))
Warning in nls(formula = y ~ Const + A * exp(B * x), algorithm = "default",  :
  No starting values specified for some parameters.
Initializing ‘Const’, ‘A’, ‘B’ to '1.'.
Consider specifying 'start' or using a selfStart model
> o2 <- options(digits = 10) # more accuracy for 'trace'
> ## central differencing works here typically (PR#18165: not converging on *some*):
> ctr2 <- nls.control(nDcentral=TRUE, tol = 8e-8, # <- even smaller than above
+    warnOnly =
+         TRUE || # << work around; e.g. needed on some ATLAS-Lapack setups
+         (grepl("^aarch64.*linux", R.version$platform) && grepl("^NixOS", osVersion)
+               ))
> (nlm2 <- update(nlmod, control = ctr2, trace = TRUE)); options(o2)
Warning in nls(formula = y ~ Const + A * exp(B * x), algorithm = "default",  :
  No starting values specified for some parameters.
Initializing ‘Const’, ‘A’, ‘B’ to '1.'.
Consider specifying 'start' or using a selfStart model
1017460.306    (4.15e+02): par = (1 1 1)
758164.7503    (2.34e+02): par = (13.42031396 1.961485 0.05947543745)
269506.3538    (3.23e+02): par = (51.75719816 -13.09155956 0.8428607705)
68969.21895    (1.03e+02): par = (76.0006985 -1.935226741 1.019085799)
633.3672233    (1.29e+00): par = (100.3761515 8.624648407 5.104490263)
151.4400223    (9.39e+00): par = (100.6344391 4.913490982 0.2849209561)
53.08739887    (7.24e+00): par = (100.6830407 6.899303309 0.4637755073)
1.344478645    (5.97e-01): par = (100.0368306 9.897714142 0.516929494)
0.9908415909   (1.55e-02): par = (100.0300625 9.9144191 0.5023516843)
0.9906046057   (1.84e-05): par = (100.0288724 9.916224018 0.5025207337)
0.9906046054   (9.93e-08): par = (100.028875 9.916228366 0.50252165)
0.9906046054   (6.56e-10): par = (100.028875 9.916228388 0.5025216549)
Nonlinear regression model
  model: y ~ Const + A * exp(B * x)
   data: parent.frame()
      Const           A           B 
100.0288750   9.9162284   0.5025217 
 residual sum-of-squares: 0.9906046
> ## --> convergence tolerance  4.997e-8 (in 11 iter.)
> ## IGNORE_RDIFF_END
> 
> ## The muscle dataset in MASS is from an experiment on muscle
> ## contraction on 21 animals.  The observed variables are Strip
> ## (identifier of muscle), Conc (Cacl concentration) and Length
> ## (resulting length of muscle section).
> ## IGNORE_RDIFF_BEGIN
> if(requireNamespace("MASS", quietly = TRUE)) withAutoprint({
+ ## The non linear model considered is
+ ##       Length = alpha + beta*exp(-Conc/theta) + error
+ ## where theta is constant but alpha and beta may vary with Strip.
+ 
+ with(MASS::muscle, table(Strip)) # 2, 3 or 4 obs per strip
+ 
+ ## We first use the plinear algorithm to fit an overall model,
+ ## ignoring that alpha and beta might vary with Strip.
+ musc.1 <- nls(Length ~ cbind(1, exp(-Conc/th)), MASS::muscle,
+               start = list(th = 1), algorithm = "plinear")
+ summary(musc.1)
+ 
+ ## Then we use nls' indexing feature for parameters in non-linear
+ ## models to use the conventional algorithm to fit a model in which
+ ## alpha and beta vary with Strip.  The starting values are provided
+ ## by the previously fitted model.
+ ## Note that with indexed parameters, the starting values must be
+ ## given in a list (with names):
+ b <- coef(musc.1)
+ musc.2 <- nls(Length ~ a[Strip] + b[Strip]*exp(-Conc/th), MASS::muscle,
+               start = list(a = rep(b[2], 21), b = rep(b[3], 21), th = b[1]))
+ summary(musc.2)
+ })
> with(MASS::muscle, table(Strip))
Strip
S01 S02 S03 S04 S05 S06 S07 S08 S09 S10 S11 S12 S13 S14 S15 S16 S17 S18 S19 S20 
  4   4   4   3   3   3   2   2   2   2   3   2   2   2   2   4   4   3   3   3 
S21 
  3 
> musc.1 <- nls(Length ~ cbind(1, exp(-Conc/th)), MASS::muscle, start = list(th = 1), 
+     algorithm = "plinear")
> summary(musc.1)

Formula: Length ~ cbind(1, exp(-Conc/th))

Parameters:
      Estimate Std. Error t value Pr(>|t|)    
th       0.608      0.115    5.31  1.9e-06 ***
.lin1   28.963      1.230   23.55  < 2e-16 ***
.lin2  -34.227      3.793   -9.02  1.4e-12 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 4.67 on 57 degrees of freedom

> b <- coef(musc.1)
> musc.2 <- nls(Length ~ a[Strip] + b[Strip] * exp(-Conc/th), MASS::muscle, 
+     start = list(a = rep(b[2], 21), b = rep(b[3], 21), th = b[1]))
> summary(musc.2)

Formula: Length ~ a[Strip] + b[Strip] * exp(-Conc/th)

Parameters:
    Estimate Std. Error t value Pr(>|t|)    
a1    23.454      0.796   29.46  5.0e-16 ***
a2    28.302      0.793   35.70  < 2e-16 ***
a3    30.801      1.716   17.95  1.7e-12 ***
a4    25.921      3.016    8.60  1.4e-07 ***
a5    23.201      2.891    8.02  3.5e-07 ***
a6    20.120      2.435    8.26  2.3e-07 ***
a7    33.595      1.682   19.98  3.0e-13 ***
a8    39.053      3.753   10.41  8.6e-09 ***
a9    32.137      3.318    9.69  2.5e-08 ***
a10   40.005      3.336   11.99  1.0e-09 ***
a11   36.190      3.109   11.64  1.6e-09 ***
a12   36.911      1.839   20.07  2.8e-13 ***
a13   30.635      1.700   18.02  1.6e-12 ***
a14   34.312      3.495    9.82  2.0e-08 ***
a15   38.395      3.375   11.38  2.3e-09 ***
a16   31.226      0.886   35.26  < 2e-16 ***
a17   31.230      0.821   38.02  < 2e-16 ***
a18   19.998      1.011   19.78  3.6e-13 ***
a19   37.095      1.071   34.65  < 2e-16 ***
a20   32.594      1.121   29.07  6.2e-16 ***
a21   30.376      1.057   28.74  7.5e-16 ***
b1   -27.300      6.873   -3.97  0.00099 ***
b2   -26.270      6.754   -3.89  0.00118 ** 
b3   -30.901      2.270  -13.61  1.4e-10 ***
b4   -32.238      3.810   -8.46  1.7e-07 ***
b5   -29.941      3.773   -7.94  4.1e-07 ***
b6   -20.622      3.647   -5.65  2.9e-05 ***
b7   -19.625      8.085   -2.43  0.02661 *  
b8   -45.780      4.113  -11.13  3.2e-09 ***
b9   -31.345      6.352   -4.93  0.00013 ***
b10  -38.599      3.955   -9.76  2.2e-08 ***
b11  -33.921      3.839   -8.84  9.2e-08 ***
b12  -38.268      8.992   -4.26  0.00053 ***
b13  -22.568      8.194   -2.75  0.01355 *  
b14  -36.167      6.358   -5.69  2.7e-05 ***
b15  -32.952      6.354   -5.19  7.4e-05 ***
b16  -47.207      9.540   -4.95  0.00012 ***
b17  -33.875      7.688   -4.41  0.00039 ***
b18  -15.896      6.222   -2.55  0.02051 *  
b19  -28.969      7.235   -4.00  0.00092 ***
b20  -36.917      8.033   -4.60  0.00026 ***
b21  -26.508      7.012   -3.78  0.00149 ** 
th     0.797      0.127    6.30  8.0e-06 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 1.11 on 17 degrees of freedom

> ## IGNORE_RDIFF_END
> ## Don't show: 
> options(od)
> ## End(Don't show)
> 
> 
> 
> cleanEx()
> nameEx("nls.control")
> ### * nls.control
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: nls.control
> ### Title: Control the Iterations in 'nls'
> ### Aliases: nls.control
> ### Keywords: nonlinear regression models
> 
> ### ** Examples
> 
> nls.control(minFactor = 1/2048)
$maxiter
[1] 50

$tol
[1] 1e-05

$minFactor
[1] 0.0004882812

$printEval
[1] FALSE

$warnOnly
[1] FALSE

$scaleOffset
[1] 0

$nDcentral
[1] FALSE

> 
> 
> 
> cleanEx()
> nameEx("numericDeriv")
> ### * numericDeriv
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: numericDeriv
> ### Title: Evaluate Derivatives Numerically
> ### Aliases: numericDeriv
> ### Keywords: models
> 
> ### ** Examples
> 
> myenv <- new.env()
> myenv$mean <- 0.
> myenv$sd   <- 1.
> myenv$x    <- seq(-3., 3., length.out = 31)
> nD <- numericDeriv(quote(pnorm(x, mean, sd)), c("mean", "sd"), myenv)
> str(nD)
 num [1:31] 0.00135 0.00256 0.00466 0.0082 0.0139 ...
 - attr(*, "gradient")= num [1:31, 1:2] -0.00443 -0.00792 -0.01358 -0.02239 -0.03547 ...
> 
> ## Visualize :
> require(graphics)
> matplot(myenv$x, cbind(c(nD), attr(nD, "gradient")), type="l")
> abline(h=0, lty=3)
> ## "gradient" is close to the true derivatives, you don't see any diff.:
> curve( - dnorm(x), col=2, lty=3, lwd=2, add=TRUE)
> curve(-x*dnorm(x), col=3, lty=3, lwd=2, add=TRUE)
> ##
> ## IGNORE_RDIFF_BEGIN
> # shows 1.609e-8 on most platforms
> all.equal(attr(nD,"gradient"),
+           with(myenv, cbind(-dnorm(x), -x*dnorm(x))))
[1] "Mean relative difference: 1.609569e-08"
> ## IGNORE_RDIFF_END
> 
> 
> 
> cleanEx()
> nameEx("oneway.test")
> ### * oneway.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: oneway.test
> ### Title: Test for Equal Means in a One-Way Layout
> ### Aliases: oneway.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Not assuming equal variances
> oneway.test(extra ~ group, data = sleep)

	One-way analysis of means (not assuming equal variances)

data:  extra and group
F = 3.4626, num df = 1.000, denom df = 17.776, p-value = 0.07939

> ## Assuming equal variances
> oneway.test(extra ~ group, data = sleep, var.equal = TRUE)

	One-way analysis of means

data:  extra and group
F = 3.4626, num df = 1, denom df = 18, p-value = 0.07919

> ## which gives the same result as
> anova(lm(extra ~ group, data = sleep))
Analysis of Variance Table

Response: extra
          Df Sum Sq Mean Sq F value  Pr(>F)  
group      1 12.482 12.4820  3.4626 0.07919 .
Residuals 18 64.886  3.6048                  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> 
> 
> 
> cleanEx()
> nameEx("optim")
> ### * optim
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: optim
> ### Title: General-purpose Optimization
> ### Aliases: optim optimHess
> ### Keywords: nonlinear optimize
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("optimize")
> ### * optimize
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: optimize
> ### Title: One Dimensional Optimization
> ### Aliases: optimize optimise
> ### Keywords: optimize
> 
> ### ** Examples
> 
> require(graphics)
> 
> f <- function (x, a) (x - a)^2
> xmin <- optimize(f, c(0, 1), tol = 0.0001, a = 1/3)
> xmin
$minimum
[1] 0.3333333

$objective
[1] 0

> 
> ## See where the function is evaluated:
> optimize(function(x) x^2*(print(x)-1), lower = 0, upper = 10)
[1] 3.81966
[1] 6.18034
[1] 2.36068
[1] 2.077939
[1] 1.505823
[1] 0.9306496
[1] 0.9196752
[1] 0.772905
[1] 0.4776816
[1] 0.6491436
[1] 0.656315
[1] 0.6653777
[1] 0.6667786
[1] 0.6666728
[1] 0.6666321
[1] 0.6667135
[1] 0.6666728
$minimum
[1] 0.6666728

$objective
[1] -0.1481481

> 
> ## "wrong" solution with unlucky interval and piecewise constant f():
> f  <- function(x) ifelse(x > -1, ifelse(x < 4, exp(-1/abs(x - 1)), 10), 10)
> fp <- function(x) { print(x); f(x) }
> 
> plot(f, -2,5, ylim = 0:1, col = 2)
> optimize(fp, c(-4, 20))   # doesn't see the minimum
[1] 5.167184
[1] 10.83282
[1] 14.33437
[1] 16.49845
[1] 17.83592
[1] 18.66253
[1] 19.1734
[1] 19.48913
[1] 19.68427
[1] 19.80487
[1] 19.8794
[1] 19.92547
[1] 19.95393
[1] 19.97153
[1] 19.9824
[1] 19.98913
[1] 19.99328
[1] 19.99585
[1] 19.99743
[1] 19.99841
[1] 19.99902
[1] 19.99939
[1] 19.99963
[1] 19.99977
[1] 19.99986
[1] 19.99991
[1] 19.99995
[1] 19.99995
$minimum
[1] 19.99995

$objective
[1] 10

> optimize(fp, c(-7, 20))   # ok
[1] 3.313082
[1] 9.686918
[1] -0.6261646
[1] 1.244956
[1] 1.250965
[1] 0.771827
[1] 0.2378417
[1] 1.000451
[1] 0.9906964
[1] 0.9955736
[1] 0.9980122
[1] 0.9992315
[1] 0.9998411
[1] 0.9996083
[1] 0.9994644
[1] 0.9993754
[1] 0.9993204
[1] 0.9992797
[1] 0.9992797
$minimum
[1] 0.9992797

$objective
[1] 0

> 
> 
> 
> cleanEx()
> nameEx("order.dendrogram")
> ### * order.dendrogram
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: order.dendrogram
> ### Title: Ordering or Labels of the Leaves in a Dendrogram
> ### Aliases: order.dendrogram labels.dendrogram
> ### Keywords: manip
> 
> ### ** Examples
> 
> set.seed(123)
> x <- rnorm(10)
> hc <- hclust(dist(x))
> hc$order
 [1]  3  6  7  2  4  5  8  9  1 10
> dd <- as.dendrogram(hc)
> order.dendrogram(dd) ## the same :
 [1]  3  6  7  2  4  5  8  9  1 10
> stopifnot(hc$order == order.dendrogram(dd))
> 
> d2 <- as.dendrogram(hclust(dist(USArrests)))
> labels(d2) ## in this case the same as
 [1] "Florida"        "North Carolina" "Delaware"       "Alabama"       
 [5] "Louisiana"      "Alaska"         "Mississippi"    "South Carolina"
 [9] "Maryland"       "Arizona"        "New Mexico"     "California"    
[13] "Illinois"       "New York"       "Michigan"       "Nevada"        
[17] "Missouri"       "Arkansas"       "Tennessee"      "Georgia"       
[21] "Colorado"       "Texas"          "Rhode Island"   "Wyoming"       
[25] "Oregon"         "Oklahoma"       "Virginia"       "Washington"    
[29] "Massachusetts"  "New Jersey"     "Ohio"           "Utah"          
[33] "Connecticut"    "Pennsylvania"   "Nebraska"       "Kentucky"      
[37] "Montana"        "Idaho"          "Indiana"        "Kansas"        
[41] "Hawaii"         "Minnesota"      "Wisconsin"      "Iowa"          
[45] "New Hampshire"  "West Virginia"  "Maine"          "South Dakota"  
[49] "North Dakota"   "Vermont"       
> stopifnot(identical(labels(d2),
+    rownames(USArrests)[order.dendrogram(d2)]))
> 
> 
> 
> cleanEx()
> nameEx("p.adjust")
> ### * p.adjust
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: p.adjust
> ### Title: Adjust P-values for Multiple Comparisons
> ### Aliases: p.adjust p.adjust.methods
> ### Keywords: htest
> 
> ### ** Examples
> 
> require(graphics)
> 
> set.seed(123)
> x <- rnorm(50, mean = c(rep(0, 25), rep(3, 25)))
> p <- 2*pnorm(sort(-abs(x)))
> 
> round(p, 3)
 [1] 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.001 0.002
[13] 0.003 0.004 0.005 0.007 0.007 0.009 0.009 0.011 0.021 0.049 0.061 0.063
[25] 0.074 0.083 0.086 0.119 0.189 0.206 0.221 0.286 0.305 0.466 0.483 0.492
[37] 0.532 0.575 0.578 0.619 0.636 0.645 0.656 0.689 0.719 0.818 0.827 0.897
[49] 0.912 0.944
> round(p.adjust(p), 3)
 [1] 0.000 0.001 0.001 0.005 0.005 0.006 0.006 0.007 0.009 0.016 0.024 0.063
[13] 0.125 0.131 0.189 0.239 0.240 0.291 0.301 0.350 0.635 1.000 1.000 1.000
[25] 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000
[37] 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000 1.000
[49] 1.000 1.000
> round(p.adjust(p, "BH"), 3)
 [1] 0.000 0.000 0.000 0.001 0.001 0.001 0.001 0.001 0.001 0.002 0.003 0.007
[13] 0.013 0.013 0.017 0.021 0.021 0.024 0.025 0.028 0.050 0.112 0.130 0.130
[25] 0.148 0.159 0.160 0.213 0.326 0.343 0.356 0.446 0.462 0.684 0.684 0.684
[37] 0.719 0.741 0.741 0.763 0.763 0.763 0.763 0.782 0.799 0.880 0.880 0.930
[49] 0.930 0.944
> 
> ## or all of them at once (dropping the "fdr" alias):
> p.adjust.M <- p.adjust.methods[p.adjust.methods != "fdr"]
> p.adj    <- sapply(p.adjust.M, function(meth) p.adjust(p, meth))
> p.adj.60 <- sapply(p.adjust.M, function(meth) p.adjust(p, meth, n = 60))
> stopifnot(identical(p.adj[,"none"], p), p.adj <= p.adj.60)
> round(p.adj, 3)
       holm hochberg hommel bonferroni    BH    BY  none
 [1,] 0.000    0.000  0.000      0.000 0.000 0.000 0.000
 [2,] 0.001    0.001  0.001      0.001 0.000 0.002 0.000
 [3,] 0.001    0.001  0.001      0.001 0.000 0.002 0.000
 [4,] 0.005    0.005  0.004      0.005 0.001 0.004 0.000
 [5,] 0.005    0.005  0.005      0.005 0.001 0.004 0.000
 [6,] 0.006    0.006  0.005      0.006 0.001 0.004 0.000
 [7,] 0.006    0.006  0.006      0.007 0.001 0.004 0.000
 [8,] 0.007    0.007  0.007      0.008 0.001 0.004 0.000
 [9,] 0.009    0.009  0.009      0.011 0.001 0.006 0.000
[10,] 0.016    0.016  0.015      0.019 0.002 0.009 0.000
[11,] 0.024    0.024  0.024      0.031 0.003 0.013 0.001
[12,] 0.063    0.063  0.058      0.081 0.007 0.030 0.002
[13,] 0.125    0.125  0.109      0.165 0.013 0.057 0.003
[14,] 0.131    0.131  0.117      0.177 0.013 0.057 0.004
[15,] 0.189    0.189  0.168      0.262 0.017 0.079 0.005
[16,] 0.239    0.239  0.212      0.342 0.021 0.093 0.007
[17,] 0.240    0.240  0.219      0.353 0.021 0.093 0.007
[18,] 0.291    0.291  0.273      0.440 0.024 0.110 0.009
[19,] 0.301    0.301  0.291      0.470 0.025 0.111 0.009
[20,] 0.350    0.350  0.339      0.565 0.028 0.127 0.011
[21,] 0.635    0.635  0.571      1.000 0.050 0.227 0.021
[22,] 1.000    0.944  0.944      1.000 0.112 0.503 0.049
[23,] 1.000    0.944  0.944      1.000 0.130 0.587 0.061
[24,] 1.000    0.944  0.944      1.000 0.130 0.587 0.063
[25,] 1.000    0.944  0.944      1.000 0.148 0.665 0.074
[26,] 1.000    0.944  0.944      1.000 0.159 0.717 0.083
[27,] 1.000    0.944  0.944      1.000 0.160 0.719 0.086
[28,] 1.000    0.944  0.944      1.000 0.213 0.957 0.119
[29,] 1.000    0.944  0.944      1.000 0.326 1.000 0.189
[30,] 1.000    0.944  0.944      1.000 0.343 1.000 0.206
[31,] 1.000    0.944  0.944      1.000 0.356 1.000 0.221
[32,] 1.000    0.944  0.944      1.000 0.446 1.000 0.286
[33,] 1.000    0.944  0.944      1.000 0.462 1.000 0.305
[34,] 1.000    0.944  0.944      1.000 0.684 1.000 0.466
[35,] 1.000    0.944  0.944      1.000 0.684 1.000 0.483
[36,] 1.000    0.944  0.944      1.000 0.684 1.000 0.492
[37,] 1.000    0.944  0.944      1.000 0.719 1.000 0.532
[38,] 1.000    0.944  0.944      1.000 0.741 1.000 0.575
[39,] 1.000    0.944  0.944      1.000 0.741 1.000 0.578
[40,] 1.000    0.944  0.944      1.000 0.763 1.000 0.619
[41,] 1.000    0.944  0.944      1.000 0.763 1.000 0.636
[42,] 1.000    0.944  0.944      1.000 0.763 1.000 0.645
[43,] 1.000    0.944  0.944      1.000 0.763 1.000 0.656
[44,] 1.000    0.944  0.944      1.000 0.782 1.000 0.689
[45,] 1.000    0.944  0.944      1.000 0.799 1.000 0.719
[46,] 1.000    0.944  0.944      1.000 0.880 1.000 0.818
[47,] 1.000    0.944  0.944      1.000 0.880 1.000 0.827
[48,] 1.000    0.944  0.944      1.000 0.930 1.000 0.897
[49,] 1.000    0.944  0.944      1.000 0.930 1.000 0.912
[50,] 1.000    0.944  0.944      1.000 0.944 1.000 0.944
> ## or a bit nicer:
> noquote(apply(p.adj, 2, format.pval, digits = 3))
      holm     hochberg hommel   bonferroni BH       BY      none    
 [1,] 1.18e-05 1.18e-05 1.18e-05 1.18e-05   1.18e-05 5.3e-05 2.35e-07
 [2,] 0.00103  0.00103  0.00101  0.00105    0.000429 0.00193 2.10e-05
 [3,] 0.00124  0.00124  0.00124  0.00129    0.000429 0.00193 2.58e-05
 [4,] 0.00461  0.00461  0.00422  0.00491    0.000947 0.00426 9.81e-05
 [5,] 0.00484  0.00484  0.00453  0.00526    0.000947 0.00426 0.000105
 [6,] 0.00559  0.00559  0.00521  0.00621    0.000947 0.00426 0.000124
 [7,] 0.00583  0.00583  0.00557  0.00663    0.000947 0.00426 0.000133
 [8,] 0.00674  0.00674  0.00659  0.00784    0.000980 0.00441 0.000157
 [9,] 0.00947  0.00947  0.00924  0.01127    0.001253 0.00564 0.000225
[10,] 0.01556  0.01556  0.01518  0.01898    0.001898 0.00854 0.000380
[11,] 0.02446  0.02446  0.02446  0.03057    0.002780 0.01251 0.000611
[12,] 0.06294  0.06294  0.05810  0.08070    0.006725 0.03026 0.001614
[13,] 0.12549  0.12549  0.10898  0.16512    0.012637 0.05686 0.003302
[14,] 0.13092  0.13092  0.11677  0.17692    0.012637 0.05686 0.003538
[15,] 0.18853  0.18853  0.16758  0.26185    0.017457 0.07854 0.005237
[16,] 0.23912  0.23912  0.21179  0.34160    0.020762 0.09341 0.006832
[17,] 0.24001  0.24001  0.21884  0.35296    0.020762 0.09341 0.007059
[18,] 0.29057  0.29057  0.27296  0.44026    0.024459 0.11004 0.008805
[19,] 0.30083  0.30083  0.29143  0.47005    0.024740 0.11131 0.009401
[20,] 0.35024  0.35024  0.33894  0.56490    0.028245 0.12708 0.011298
[21,] 0.63451  0.63451  0.57105  1.00000    0.050358 0.22657 0.021150
[22,] 1.00000  0.94379  0.94379  1.00000    0.111880 0.50337 0.049227
[23,] 1.00000  0.94379  0.94379  1.00000    0.130463 0.58698 0.060533
[24,] 1.00000  0.94379  0.94379  1.00000    0.130463 0.58698 0.062622
[25,] 1.00000  0.94379  0.94379  1.00000    0.147903 0.66545 0.073952
[26,] 1.00000  0.94379  0.94379  1.00000    0.159252 0.71651 0.082811
[27,] 1.00000  0.94379  0.94379  1.00000    0.159877 0.71932 0.086333
[28,] 1.00000  0.94379  0.94379  1.00000    0.212617 0.95661 0.119065
[29,] 1.00000  0.94379  0.94379  1.00000    0.325999 1.00000 0.189080
[30,] 1.00000  0.94379  0.94379  1.00000    0.343082 1.00000 0.205849
[31,] 1.00000  0.94379  0.94379  1.00000    0.356325 1.00000 0.220921
[32,] 1.00000  0.94379  0.94379  1.00000    0.446250 1.00000 0.285600
[33,] 1.00000  0.94379  0.94379  1.00000    0.461954 1.00000 0.304889
[34,] 1.00000  0.94379  0.94379  1.00000    0.683577 1.00000 0.466068
[35,] 1.00000  0.94379  0.94379  1.00000    0.683577 1.00000 0.483081
[36,] 1.00000  0.94379  0.94379  1.00000    0.683577 1.00000 0.492175
[37,] 1.00000  0.94379  0.94379  1.00000    0.718845 1.00000 0.531945
[38,] 1.00000  0.94379  0.94379  1.00000    0.741435 1.00000 0.575155
[39,] 1.00000  0.94379  0.94379  1.00000    0.741435 1.00000 0.578319
[40,] 1.00000  0.94379  0.94379  1.00000    0.762606 1.00000 0.618589
[41,] 1.00000  0.94379  0.94379  1.00000    0.762606 1.00000 0.636362
[42,] 1.00000  0.94379  0.94379  1.00000    0.762606 1.00000 0.644859
[43,] 1.00000  0.94379  0.94379  1.00000    0.762606 1.00000 0.655841
[44,] 1.00000  0.94379  0.94379  1.00000    0.782487 1.00000 0.688588
[45,] 1.00000  0.94379  0.94379  1.00000    0.798874 1.00000 0.718986
[46,] 1.00000  0.94379  0.94379  1.00000    0.880265 1.00000 0.817954
[47,] 1.00000  0.94379  0.94379  1.00000    0.880265 1.00000 0.827449
[48,] 1.00000  0.94379  0.94379  1.00000    0.930478 1.00000 0.897130
[49,] 1.00000  0.94379  0.94379  1.00000    0.930478 1.00000 0.911868
[50,] 1.00000  0.94379  0.94379  1.00000    0.943789 1.00000 0.943789
> 
> 
> ## and a graphic:
> matplot(p, p.adj, ylab="p.adjust(p, meth)", type = "l", asp = 1, lty = 1:6,
+         main = "P-value adjustments")
> legend(0.7, 0.6, p.adjust.M, col = 1:6, lty = 1:6)
> 
> ## Can work with NA's:
> pN <- p; iN <- c(46, 47); pN[iN] <- NA
> pN.a <- sapply(p.adjust.M, function(meth) p.adjust(pN, meth))
> ## The smallest 20 P-values all affected by the NA's :
> round((pN.a / p.adj)[1:20, ] , 4)
        holm hochberg hommel bonferroni   BH     BY none
 [1,] 0.9600   0.9600 0.9600       0.96 0.96 0.9514    1
 [2,] 0.9592   0.9592 0.9583       0.96 0.96 0.9514    1
 [3,] 0.9583   0.9583 0.9583       0.96 0.96 0.9514    1
 [4,] 0.9574   0.9574 0.9535       0.96 0.96 0.9514    1
 [5,] 0.9565   0.9565 0.9535       0.96 0.96 0.9514    1
 [6,] 0.9556   0.9556 0.9524       0.96 0.96 0.9514    1
 [7,] 0.9545   0.9545 0.9524       0.96 0.96 0.9514    1
 [8,] 0.9535   0.9535 0.9524       0.96 0.96 0.9514    1
 [9,] 0.9524   0.9524 0.9512       0.96 0.96 0.9514    1
[10,] 0.9512   0.9512 0.9500       0.96 0.96 0.9514    1
[11,] 0.9500   0.9500 0.9500       0.96 0.96 0.9514    1
[12,] 0.9487   0.9487 0.9444       0.96 0.96 0.9514    1
[13,] 0.9474   0.9474 0.9394       0.96 0.96 0.9514    1
[14,] 0.9459   0.9459 0.9394       0.96 0.96 0.9514    1
[15,] 0.9444   0.9444 0.9375       0.96 0.96 0.9514    1
[16,] 0.9429   0.9429 0.9355       0.96 0.96 0.9514    1
[17,] 0.9412   0.9412 0.9355       0.96 0.96 0.9514    1
[18,] 0.9394   0.9394 0.9355       0.96 0.96 0.9514    1
[19,] 0.9375   0.9375 0.9355       0.96 0.96 0.9514    1
[20,] 0.9355   0.9355 0.9333       0.96 0.96 0.9514    1
> 
> 
> 
> cleanEx()
> nameEx("pairwise.prop.test")
> ### * pairwise.prop.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: pairwise.prop.test
> ### Title: Pairwise comparisons for proportions
> ### Aliases: pairwise.prop.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> smokers  <- c( 83, 90, 129, 70 )
> patients <- c( 86, 93, 136, 82 )
> pairwise.prop.test(smokers, patients)
Warning in prop.test(x[c(i, j)], n[c(i, j)], ...) :
  Chi-squared approximation may be incorrect
Warning in prop.test(x[c(i, j)], n[c(i, j)], ...) :
  Chi-squared approximation may be incorrect
Warning in prop.test(x[c(i, j)], n[c(i, j)], ...) :
  Chi-squared approximation may be incorrect

	Pairwise comparisons using Pairwise comparison of proportions 

data:  smokers out of patients 

  1     2     3    
2 1.000 -     -    
3 1.000 1.000 -    
4 0.119 0.093 0.124

P value adjustment method: holm 
> 
> 
> 
> cleanEx()
> nameEx("pairwise.t.test")
> ### * pairwise.t.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: pairwise.t.test
> ### Title: Pairwise t tests
> ### Aliases: pairwise.t.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> attach(airquality)
> Month <- factor(Month, labels = month.abb[5:9])
> pairwise.t.test(Ozone, Month)

	Pairwise comparisons using t tests with pooled SD 

data:  Ozone and Month 

    May     Jun     Jul     Aug    
Jun 1.00000 -       -       -      
Jul 0.00026 0.05113 -       -      
Aug 0.00019 0.04987 1.00000 -      
Sep 1.00000 1.00000 0.00488 0.00388

P value adjustment method: holm 
> pairwise.t.test(Ozone, Month, p.adjust.method = "bonf")

	Pairwise comparisons using t tests with pooled SD 

data:  Ozone and Month 

    May     Jun     Jul     Aug    
Jun 1.00000 -       -       -      
Jul 0.00029 0.10225 -       -      
Aug 0.00019 0.08312 1.00000 -      
Sep 1.00000 1.00000 0.00697 0.00485

P value adjustment method: bonferroni 
> pairwise.t.test(Ozone, Month, pool.sd = FALSE)

	Pairwise comparisons using t tests with non-pooled SD 

data:  Ozone and Month 

    May     Jun     Jul     Aug    
Jun 1.00000 -       -       -      
Jul 0.00026 0.01527 -       -      
Aug 0.00195 0.02135 1.00000 -      
Sep 0.86321 1.00000 0.00589 0.01721

P value adjustment method: holm 
> detach()
> 
> 
> 
> cleanEx()
> nameEx("pairwise.wilcox.test")
> ### * pairwise.wilcox.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: pairwise.wilcox.test
> ### Title: Pairwise Wilcoxon Rank Sum Tests
> ### Aliases: pairwise.wilcox.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> attach(airquality)
> Month <- factor(Month, labels = month.abb[5:9])
> ## These give warnings because of ties :
> pairwise.wilcox.test(Ozone, Month)
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties

	Pairwise comparisons using Wilcoxon rank sum test with continuity correction 

data:  Ozone and Month 

    May    Jun    Jul    Aug   
Jun 0.5775 -      -      -     
Jul 0.0003 0.0848 -      -     
Aug 0.0011 0.1295 1.0000 -     
Sep 0.4744 1.0000 0.0060 0.0227

P value adjustment method: holm 
> pairwise.wilcox.test(Ozone, Month, p.adjust.method = "bonf")
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties
Warning in wilcox.test.default(xi, xj, paired = paired, ...) :
  cannot compute exact p-value with ties

	Pairwise comparisons using Wilcoxon rank sum test with continuity correction 

data:  Ozone and Month 

    May    Jun    Jul    Aug   
Jun 1.0000 -      -      -     
Jul 0.0003 0.1414 -      -     
Aug 0.0012 0.2591 1.0000 -     
Sep 1.0000 1.0000 0.0074 0.0325

P value adjustment method: bonferroni 
> detach()
> 
> 
> 
> cleanEx()
> nameEx("plot.acf")
> ### * plot.acf
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: plot.acf
> ### Title: Plot Autocovariance and Autocorrelation Functions
> ### Aliases: plot.acf
> ### Keywords: hplot ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> z4  <- ts(matrix(rnorm(400), 100, 4), start = c(1961, 1), frequency = 12)
> z7  <- ts(matrix(rnorm(700), 100, 7), start = c(1961, 1), frequency = 12)
> acf(z4)
> acf(z7, max.mfrow = 7)   # squeeze onto 1 page
> acf(z7) # multi-page
> 
> 
> 
> cleanEx()
> nameEx("plot.isoreg")
> ### * plot.isoreg
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: plot.isoreg
> ### Title: Plot Method for 'isoreg' Objects
> ### Aliases: plot.isoreg lines.isoreg
> ### Keywords: hplot print
> 
> ### ** Examples
> 
> require(graphics)
> 
> utils::example(isoreg) # for the examples there

isoreg> require(graphics)

isoreg> (ir <- isoreg(c(1,0,4,3,3,5,4,2,0)))
Isotonic regression from isoreg(x = c(1, 0, 4, 3, 3, 5, 4, 2, 0)),
  with 2 knots / breaks at obs.nr. 2 9 ;
  initially ordered 'x'
  and further components List of 4
 $ x : num [1:9] 1 2 3 4 5 6 7 8 9
 $ y : num [1:9] 1 0 4 3 3 5 4 2 0
 $ yf: num [1:9] 0.5 0.5 3 3 3 3 3 3 3
 $ yc: num [1:10] 0 1 1 5 8 11 16 20 22 22

isoreg> plot(ir, plot.type = "row")

isoreg> (ir3 <- isoreg(y3 <- c(1,0,4,3,3,5,4,2, 3))) # last "3", not "0"
Isotonic regression from isoreg(x = y3 <- c(1, 0, 4, 3, 3, 5, 4, 2, 3)),
  with 3 knots / breaks at obs.nr. 2 5 9 ;
  initially ordered 'x'
  and further components List of 4
 $ x : num [1:9] 1 2 3 4 5 6 7 8 9
 $ y : num [1:9] 1 0 4 3 3 5 4 2 3
 $ yf: num [1:9] 0.5 0.5 3.33 3.33 3.33 ...
 $ yc: num [1:10] 0 1 1 5 8 11 16 20 22 25

isoreg> (fi3 <- as.stepfun(ir3))
Step function
Call: isoreg(x = y3 <- c(1, 0, 4, 3, 3, 5, 4, 2, 3))
 x[1:3] =      2,      5,      9
4 plateau levels =    0.5,    0.5, 3.3333,    3.5

isoreg> (ir4 <- isoreg(1:10, y4 <- c(5, 9, 1:2, 5:8, 3, 8)))
Isotonic regression from isoreg(x = 1:10, y = y4 <- c(5, 9, 1:2, 5:8, 3, 8)),
  with 5 knots / breaks at obs.nr. 4 5 6 9 10 ;
  initially ordered 'x'
  and further components List of 4
 $ x : num [1:10] 1 2 3 4 5 6 7 8 9 10
 $ y : num [1:10] 5 9 1 2 5 6 7 8 3 8
 $ yf: num [1:10] 4.25 4.25 4.25 4.25 5 6 6 6 6 8
 $ yc: num [1:11] 0 5 14 15 17 22 28 35 43 46 ...

isoreg> cat(sprintf("R^2 = %.2f\n",
isoreg+             1 - sum(residuals(ir4)^2) / ((10-1)*var(y4))))
R^2 = 0.21

isoreg> ## If you are interested in the knots alone :
isoreg> with(ir4, cbind(iKnots, yf[iKnots]))
     iKnots     
[1,]      4 4.25
[2,]      5 5.00
[3,]      6 6.00
[4,]      9 6.00
[5,]     10 8.00

isoreg> ## Example of unordered x[] with ties:
isoreg> x <- sample((0:30)/8)

isoreg> y <- exp(x)

isoreg> x. <- round(x) # ties!

isoreg> plot(m <- isoreg(x., y))

isoreg> stopifnot(all.equal(with(m, yf[iKnots]),
isoreg+                     as.vector(tapply(y, x., mean))))
> 
> plot(y3, main = "simple plot(.)  +  lines(<isoreg>)")
> lines(ir3)
> 
> ## 'same' plot as above, "proving" that only ranks of 'x' are important
> plot(isoreg(2^(1:9), c(1,0,4,3,3,5,4,2,0)), plot.type = "row", log = "x")
Warning in xy.coords(x, y, xlabel, ylabel, log) :
  1 x value <= 0 omitted from logarithmic plot
Warning in xy.coords(x, y, xlabel, ylabel, log) :
  1 x value <= 0 omitted from logarithmic plot
> 
> plot(ir3, plot.type = "row", ylab = "y3")
> plot(isoreg(y3 - 4), plot.type = "r", ylab = "y3 - 4")
> plot(ir4, plot.type = "ro",  ylab = "y4", xlab = "x = 1:n")
> 
> ## experiment a bit with these (C-c C-j):
> plot(isoreg(sample(9),  y3), plot.type = "row")
> plot(isoreg(sample(9),  y3), plot.type = "col.wise")
> 
> plot(ir <- isoreg(sample(10), sample(10, replace = TRUE)),
+                   plot.type = "r")
> 
> 
> 
> cleanEx()
> nameEx("plot.lm")
> ### * plot.lm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: plot.lm
> ### Title: Plot Diagnostics for an 'lm' Object
> ### Aliases: plot.lm
> ### Keywords: hplot regression
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Analysis of the life-cycle savings data
> ## given in Belsley, Kuh and Welsch.
> lm.SR <- lm(sr ~ pop15 + pop75 + dpi + ddpi, data = LifeCycleSavings)
> plot(lm.SR)
> 
> ## 4 plots on 1 page;
> ## allow room for printing model formula in outer margin:
> par(mfrow = c(2, 2), oma = c(0, 0, 2, 0)) -> opar
> plot(lm.SR)
> plot(lm.SR, id.n = NULL)                 # no id's
> plot(lm.SR, id.n = 5, labels.id = NULL)  # 5 id numbers
> 
> ## Was default in R <= 2.1.x:
> ## Cook's distances instead of Residual-Leverage plot
> plot(lm.SR, which = 1:4)
> 
> ## All the above fit a smooth curve where applicable
> ## by default unless "add.smooth" is changed.
> ## Give a smoother curve by increasing the lowess span :
> plot(lm.SR, panel = function(x, y) panel.smooth(x, y, span = 1))
> 
> par(mfrow = c(2,1)) # same oma as above
> plot(lm.SR, which = 1:2, sub.caption = "Saving Rates, n=50, p=5")
> 
> ## Cook's distance tweaking
> par(mfrow = c(2,3)) # same oma ...
> plot(lm.SR, which = 1:6, cook.col = "royalblue")
> 
> ## A case where over plotting of the "legend" is to be avoided:
> if(dev.interactive(TRUE)) getOption("device")(height = 6, width = 4)
> par(mfrow = c(3,1), mar = c(5,5,4,2)/2 +.1, mgp = c(1.4, .5, 0))
> plot(lm.SR, which = 5, extend.ylim.f = c(0.2, 0.08))
> plot(lm.SR, which = 5, cook.lty = "dotdash",
+      cook.legendChanges = list(x = "bottomright", legend = "Cook"))
> plot(lm.SR, which = 5, cook.legendChanges = NULL)  # no "legend"
> 
> ## Don't show: 
> ## An example with *long* formula that needs abbreviation:
> par(mfrow = c(2,2))
> for(i in 1:5) assign(paste("long.var.name", i, sep = "."), runif(10))
> plot(lm(long.var.name.1 ~
+         long.var.name.2 + long.var.name.3 + long.var.name.4 + long.var.name.5))
> ## End(Don't show)
> par(opar) # reset par()s
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("plot.ppr")
> ### * plot.ppr
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: plot.ppr
> ### Title: Plot Ridge Functions for Projection Pursuit Regression Fit
> ### Aliases: plot.ppr
> ### Keywords: hplot
> 
> ### ** Examples
> 
> require(graphics)
> 
> rock1 <- within(rock, { area1 <- area/10000; peri1 <- peri/10000 })
> par(mfrow = c(3,2)) # maybe: , pty = "s"
> rock.ppr <- ppr(log(perm) ~ area1 + peri1 + shape,
+                 data = rock1, nterms = 2, max.terms = 5)
> plot(rock.ppr, main = "ppr(log(perm)~ ., nterms=2, max.terms=5)")
> plot(update(rock.ppr, bass = 5), main = "update(..., bass = 5)")
> plot(update(rock.ppr, sm.method = "gcv", gcvpen = 2),
+      main = "update(..., sm.method=\"gcv\", gcvpen=2)")
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("plot.profile")
> ### * plot.profile
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: plot.profile
> ### Title: Plotting Functions for 'profile' Objects
> ### Aliases: plot.profile pairs.profile
> ### Keywords: models hplot
> 
> ### ** Examples
> 
> ## see ?profile.glm for another example using glm fits.
> 
> ## a version of example(profile.nls) from R >= 2.8.0
> fm1 <- nls(demand ~ SSasympOrig(Time, A, lrc), data = BOD)
> pr1 <- profile(fm1, alphamax = 0.1)
> stats:::plot.profile(pr1) ## override dispatch to plot.profile.nls
> pairs(pr1) # a little odd since the parameters are highly correlated
> 
> ## an example from ?nls
> x <- -(1:100)/10
> y <- 100 + 10 * exp(x / 2) + rnorm(x)/10
> nlmod <- nls(y ~  Const + A * exp(B * x), start=list(Const=100, A=10, B=1))
> pairs(profile(nlmod))
> 
> ## example from Dobson (1990) (see ?glm)
> counts <- c(18,17,15,20,10,20,25,13,12)
> outcome <- gl(3,1,9)
> treatment <- gl(3,3)
> ## this example is only formally a Poisson model. It is really a 
> ## comparison of 3 multinomials. Only the interaction parameters are of 
> ## interest.
> glm.D93i <- glm(counts ~ outcome * treatment, family = poisson())
> pr1 <- profile(glm.D93i)
> pr2 <- profile(glm.D93i, which=6:9)
> plot(pr1)
> plot(pr2)
> pairs(pr1)
> pairs(pr2)
> 
> 
> 
> cleanEx()
> nameEx("plot.profile.nls")
> ### * plot.profile.nls
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: plot.profile.nls
> ### Title: Plot a 'profile.nls' Object
> ### Aliases: plot.profile.nls
> ### Keywords: nonlinear regression models
> 
> ### ** Examples
> 
> require(graphics)
> 
> # obtain the fitted object
> fm1 <- nls(demand ~ SSasympOrig(Time, A, lrc), data = BOD)
> # get the profile for the fitted model
> pr1 <- profile(fm1, alphamax = 0.05)
> opar <- par(mfrow = c(2,2), oma = c(1.1, 0, 1.1, 0), las = 1)
> plot(pr1, conf = c(95, 90, 80, 50)/100)
> plot(pr1, conf = c(95, 90, 80, 50)/100, absVal = FALSE)
> mtext("Confidence intervals based on the profile sum of squares",
+       side = 3, outer = TRUE)
> mtext("BOD data - confidence levels of 50%, 80%, 90% and 95%",
+       side = 1, outer = TRUE)
> par(opar)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("plot.stepfun")
> ### * plot.stepfun
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: plot.stepfun
> ### Title: Plot Step Functions
> ### Aliases: plot.stepfun lines.stepfun
> ### Keywords: hplot
> 
> ### ** Examples
> 
> require(graphics)
> 
> y0 <- c(1,2,4,3)
> sfun0  <- stepfun(1:3, y0, f = 0)
> sfun.2 <- stepfun(1:3, y0, f = .2)
> sfun1  <- stepfun(1:3, y0, right = TRUE)
> 
> tt <- seq(0, 3, by = 0.1)
> op <- par(mfrow = c(2,2))
> plot(sfun0); plot(sfun0, xval = tt, add = TRUE, col.hor = "bisque")
> plot(sfun.2);plot(sfun.2, xval = tt, add = TRUE, col = "orange") # all colors
> plot(sfun1);lines(sfun1, xval = tt, col.hor = "coral")
> ##-- This is  revealing :
> plot(sfun0, verticals = FALSE,
+      main = "stepfun(x, y0, f=f)  for f = 0, .2, 1")
> for(i in 1:3)
+   lines(list(sfun0, sfun.2, stepfun(1:3, y0, f = 1))[[i]], col = i)
> legend(2.5, 1.9, paste("f =", c(0, 0.2, 1)), col = 1:3, lty = 1, y.intersp = 1)
> par(op)
> 
> # Extend and/or restrict 'viewport':
> plot(sfun0, xlim = c(0,5), ylim = c(0, 3.5),
+      main = "plot(stepfun(*), xlim= . , ylim = .)")
> 
> ##-- this works too (automatic call to  ecdf(.)):
> plot.stepfun(rt(50, df = 3), col.vert = "gray20")
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("plot.ts")
> ### * plot.ts
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: plot.ts
> ### Title: Plotting Time-Series Objects
> ### Aliases: plot.ts lines.ts
> ### Keywords: hplot ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Multivariate
> z <- ts(matrix(rt(200 * 8, df = 3), 200, 8),
+         start = c(1961, 1), frequency = 12)
> plot(z, yax.flip = TRUE)
> plot(z, axes = FALSE, ann = FALSE, frame.plot = TRUE,
+      mar.multi = c(0,0,0,0), oma.multi = c(1,1,5,1))
> title("plot(ts(..), axes=FALSE, ann=FALSE, frame.plot=TRUE, mar..., oma...)")
> 
> z <- window(z[,1:3], end = c(1969,12))
> plot(z, type = "b")    # multiple
> plot(z, plot.type = "single", lty = 1:3, col = 4:2)
> 
> ## A phase plot:
> plot(nhtemp, lag(nhtemp, 1), cex = .8, col = "blue",
+      main = "Lag plot of New Haven temperatures")
> 
> ## xy.lines and xy.labels are FALSE for large series:
> plot(lag(sunspots, 1), sunspots, pch = ".")
> 
> SMI <- EuStockMarkets[, "SMI"]
> plot(lag(SMI,  1), SMI, pch = ".")
> plot(lag(SMI, 20), SMI, pch = ".", log = "xy",
+      main = "4 weeks lagged SMI stocks -- log scale", xy.lines =  TRUE)
> 
> 
> 
> cleanEx()
> nameEx("poisson.test")
> ### * poisson.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: poisson.test
> ### Title: Exact Poisson tests
> ### Aliases: poisson.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> ### These are paraphrased from data sets in the ISwR package
> 
> ## SMR, Welsh Nickel workers
> poisson.test(137, 24.19893)

	Exact Poisson test

data:  137 time base: 24.19893
number of events = 137, time base = 24.199, p-value < 2.2e-16
alternative hypothesis: true event rate is not equal to 1
95 percent confidence interval:
 4.753125 6.692709
sample estimates:
event rate 
  5.661407 

> 
> ## eba1977, compare Fredericia to other three cities for ages 55-59
> poisson.test(c(11, 6+8+7), c(800, 1083+1050+878))

	Comparison of Poisson rates

data:  c(11, 6 + 8 + 7) time base: c(800, 1083 + 1050 + 878)
count1 = 11, expected count1 = 6.7174, p-value = 0.07967
alternative hypothesis: true rate ratio is not equal to 1
95 percent confidence interval:
 0.8584264 4.2772659
sample estimates:
rate ratio 
  1.971488 

> 
> 
> 
> cleanEx()
> nameEx("poly")
> ### * poly
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: poly
> ### Title: Compute Orthogonal Polynomials
> ### Aliases: poly polym predict.poly makepredictcall.poly
> ### Keywords: math
> 
> ### ** Examples
> 
> od <- options(digits = 3) # avoid too much visual clutter
> (z <- poly(1:10, 3))
           1      2      3
 [1,] -0.495  0.522 -0.453
 [2,] -0.385  0.174  0.151
 [3,] -0.275 -0.087  0.378
 [4,] -0.165 -0.261  0.335
 [5,] -0.055 -0.348  0.130
 [6,]  0.055 -0.348 -0.130
 [7,]  0.165 -0.261 -0.335
 [8,]  0.275 -0.087 -0.378
 [9,]  0.385  0.174 -0.151
[10,]  0.495  0.522  0.453
attr(,"coefs")
attr(,"coefs")$alpha
[1] 5.5 5.5 5.5

attr(,"coefs")$norm2
[1]    1.0   10.0   82.5  528.0 3088.8

attr(,"degree")
[1] 1 2 3
attr(,"class")
[1] "poly"   "matrix"
> predict(z, seq(2, 4, 0.5))
          1       2     3
[1,] -0.385  0.1741 0.151
[2,] -0.330  0.0326 0.305
[3,] -0.275 -0.0870 0.378
[4,] -0.220 -0.1850 0.383
[5,] -0.165 -0.2611 0.335
> zapsmall(poly(seq(4, 6, 0.5), 3, coefs = attr(z, "coefs")))
          1      2      3
[1,] -0.165 -0.261  0.335
[2,] -0.110 -0.316  0.246
[3,] -0.055 -0.348  0.130
[4,]  0.000 -0.359  0.000
[5,]  0.055 -0.348 -0.130
attr(,"coefs")
attr(,"coefs")$alpha
[1] 5.5 5.5 5.5

attr(,"coefs")$norm2
[1]    1.0   10.0   82.5  528.0 3088.8

attr(,"degree")
[1] 1 2 3
attr(,"class")
[1] "poly"   "matrix"
> 
>  zm <- zapsmall(polym (    1:4, c(1, 4:6),  degree = 3)) # or just poly():
> (z1 <- zapsmall(poly(cbind(1:4, c(1, 4:6)), degree = 3)))
        1.0  2.0    3.0    0.1   1.1    2.1    0.2    1.2    0.3
[1,] -0.671  0.5 -0.224 -0.802 0.538 -0.401  0.323 -0.217 -0.053
[2,] -0.224 -0.5  0.671  0.000 0.000  0.000 -0.688  0.154  0.526
[3,]  0.224 -0.5 -0.671  0.267 0.060 -0.134 -0.239 -0.053 -0.788
[4,]  0.671  0.5  0.224  0.535 0.359  0.267  0.604  0.405  0.315
attr(,"degree")
[1] 1 2 3 1 2 3 2 3 3
attr(,"coefs")
attr(,"coefs")[[1]]
attr(,"coefs")[[1]]$alpha
[1] 2.5 2.5 2.5

attr(,"coefs")[[1]]$norm2
[1] 1.0 4.0 5.0 4.0 1.8


attr(,"coefs")[[2]]
attr(,"coefs")[[2]]$alpha
[1] 4.00 2.71 4.47

attr(,"coefs")[[2]]$norm2
[1]  1.00  4.00 14.00 25.86  9.94


attr(,"class")
[1] "poly"   "matrix"
> ## they are the same :
> stopifnot(all.equal(zm, z1, tolerance = 1e-15))
> 
> ## poly(<matrix>, df) --- used to fail till July 14 (vive la France!), 2017:
> m2 <- cbind(1:4, c(1, 4:6))
> pm2 <- zapsmall(poly(m2, 3)) # "unnamed degree = 3"
> stopifnot(all.equal(pm2, zm, tolerance = 1e-15))
> 
> options(od)
> 
> 
> 
> cleanEx()
> nameEx("power")
> ### * power
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: power
> ### Title: Create a Power Link Object
> ### Aliases: power
> ### Keywords: models
> 
> ### ** Examples
> 
> power()
$linkfun
function (mu) 
mu
<environment: namespace:stats>

$linkinv
function (eta) 
eta
<environment: namespace:stats>

$mu.eta
function (eta) 
rep.int(1, length(eta))
<environment: namespace:stats>

$valideta
function (eta) 
TRUE
<environment: namespace:stats>

$name
[1] "identity"

attr(,"class")
[1] "link-glm"
> quasi(link = power(1/3))[c("linkfun", "linkinv")]
$linkfun
function (mu) 
mu^lambda
<bytecode: 0x12056a680>
<environment: 0x120575bb0>

$linkinv
function (eta) 
pmax(eta^(1/lambda), .Machine$double.eps)
<bytecode: 0x12056a530>
<environment: 0x120575bb0>

> 
> 
> 
> cleanEx()
> nameEx("power.anova.test")
> ### * power.anova.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: power.anova.test
> ### Title: Power Calculations for Balanced One-Way Analysis of Variance
> ###   Tests
> ### Aliases: power.anova.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> power.anova.test(groups = 4, n = 5, between.var = 1, within.var = 3)

     Balanced one-way analysis of variance power calculation 

         groups = 4
              n = 5
    between.var = 1
     within.var = 3
      sig.level = 0.05
          power = 0.3535594

NOTE: n is number in each group

> # Power = 0.3535594
> 
> power.anova.test(groups = 4, between.var = 1, within.var = 3,
+                  power = .80)

     Balanced one-way analysis of variance power calculation 

         groups = 4
              n = 11.92613
    between.var = 1
     within.var = 3
      sig.level = 0.05
          power = 0.8

NOTE: n is number in each group

> # n = 11.92613
> 
> ## Assume we have prior knowledge of the group means:
> groupmeans <- c(120, 130, 140, 150)
> power.anova.test(groups = length(groupmeans),
+                  between.var = var(groupmeans),
+                  within.var = 500, power = .90) # n = 15.18834

     Balanced one-way analysis of variance power calculation 

         groups = 4
              n = 15.18834
    between.var = 166.6667
     within.var = 500
      sig.level = 0.05
          power = 0.9

NOTE: n is number in each group

> 
> 
> 
> cleanEx()
> nameEx("power.prop.test")
> ### * power.prop.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: power.prop.test
> ### Title: Power Calculations for Two-Sample Test for Proportions
> ### Aliases: power.prop.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> power.prop.test(n = 50, p1 = .50, p2 = .75)      ## => power = 0.740

     Two-sample comparison of proportions power calculation 

              n = 50
             p1 = 0.5
             p2 = 0.75
      sig.level = 0.05
          power = 0.7401659
    alternative = two.sided

NOTE: n is number in *each* group

> power.prop.test(p1 = .50, p2 = .75, power = .90) ## =>     n = 76.7

     Two-sample comparison of proportions power calculation 

              n = 76.70693
             p1 = 0.5
             p2 = 0.75
      sig.level = 0.05
          power = 0.9
    alternative = two.sided

NOTE: n is number in *each* group

> power.prop.test(n = 50, p1 = .5, power = .90)    ## =>    p2 = 0.8026

     Two-sample comparison of proportions power calculation 

              n = 50
             p1 = 0.5
             p2 = 0.8026141
      sig.level = 0.05
          power = 0.9
    alternative = two.sided

NOTE: n is number in *each* group

> power.prop.test(n = 50, p1 = .5, p2 = 0.9, power = .90, sig.level=NULL)

     Two-sample comparison of proportions power calculation 

              n = 50
             p1 = 0.5
             p2 = 0.9
      sig.level = 0.001318068
          power = 0.9
    alternative = two.sided

NOTE: n is number in *each* group

>                                                  ## => sig.l = 0.00131
> power.prop.test(p1 = .5, p2 = 0.501, sig.level=.001, power=0.90)

     Two-sample comparison of proportions power calculation 

              n = 10451937
             p1 = 0.5
             p2 = 0.501
      sig.level = 0.001
          power = 0.9
    alternative = two.sided

NOTE: n is number in *each* group

>                                                  ## => n = 10451937
> try(
+  power.prop.test(n=30, p1=0.90, p2=NULL, power=0.8)
+ ) # a warning  (which may become an error)
Warning in power.prop.test(n = 30, p1 = 0.9, p2 = NULL, power = 0.8) :
  No p2 in [p1, 1] can be found to achieve the desired power

     Two-sample comparison of proportions power calculation 

              n = 30
             p1 = 0.9
             p2 = 1.030182
      sig.level = 0.05
          power = 0.8
    alternative = two.sided

NOTE: n is number in *each* group

> ## Reason:
> power.prop.test(      p1=0.90, p2= 1.0, power=0.8) ##-> n = 73.37

     Two-sample comparison of proportions power calculation 

              n = 73.37427
             p1 = 0.9
             p2 = 1
      sig.level = 0.05
          power = 0.8
    alternative = two.sided

NOTE: n is number in *each* group

> 
> 
> 
> cleanEx()
> nameEx("power.t.test")
> ### * power.t.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: power.t.test
> ### Title: Power calculations for one and two sample t tests
> ### Aliases: power.t.test
> ### Keywords: htest
> 
> ### ** Examples
> 
>  power.t.test(n = 20, delta = 1)

     Two-sample t test power calculation 

              n = 20
          delta = 1
             sd = 1
      sig.level = 0.05
          power = 0.8689528
    alternative = two.sided

NOTE: n is number in *each* group

>  power.t.test(power = .90, delta = 1)

     Two-sample t test power calculation 

              n = 22.0211
          delta = 1
             sd = 1
      sig.level = 0.05
          power = 0.9
    alternative = two.sided

NOTE: n is number in *each* group

>  power.t.test(power = .90, delta = 1, alternative = "one.sided")

     Two-sample t test power calculation 

              n = 17.84713
          delta = 1
             sd = 1
      sig.level = 0.05
          power = 0.9
    alternative = one.sided

NOTE: n is number in *each* group

> 
> 
> 
> cleanEx()
> nameEx("pp.test")
> ### * pp.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: PP.test
> ### Title: Phillips-Perron Test for Unit Roots
> ### Aliases: PP.test
> ### Keywords: ts
> 
> ### ** Examples
> 
> x <- rnorm(1000)
> PP.test(x)

	Phillips-Perron Unit Root Test

data:  x
Dickey-Fuller = -33.057, Truncation lag parameter = 7, p-value = 0.01

> y <- cumsum(x) # has unit root
> PP.test(y)

	Phillips-Perron Unit Root Test

data:  y
Dickey-Fuller = -2.6899, Truncation lag parameter = 7, p-value = 0.2863

> 
> 
> 
> cleanEx()
> nameEx("ppoints")
> ### * ppoints
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ppoints
> ### Title: Ordinates for Probability Plotting
> ### Aliases: ppoints
> ### Keywords: dplot arith distribution
> 
> ### ** Examples
> 
> ppoints(4) # the same as  ppoints(1:4)
[1] 0.1470588 0.3823529 0.6176471 0.8529412
> ppoints(10)
 [1] 0.06097561 0.15853659 0.25609756 0.35365854 0.45121951 0.54878049
 [7] 0.64634146 0.74390244 0.84146341 0.93902439
> ppoints(10, a = 1/2)
 [1] 0.05 0.15 0.25 0.35 0.45 0.55 0.65 0.75 0.85 0.95
> 
> ## Visualize including the fractions :
> require(graphics)## Don't show: 
> lNs <- loadedNamespaces()
> ## End(Don't show)
> p.ppoints <- function(n, ..., add = FALSE, col = par("col")) {
+   pn <- ppoints(n, ...)
+   if(add)
+       points(pn, pn, col = col)
+   else {
+       tit <- match.call(); tit[[1]] <- quote(ppoints)
+       plot(pn,pn, main = deparse(tit), col=col,
+            xlim = 0:1, ylim = 0:1, xaxs = "i", yaxs = "i")
+       abline(0, 1, col = adjustcolor(1, 1/4), lty = 3)
+   }
+   if(!add && requireNamespace("MASS", quietly = TRUE))
+     text(pn, pn, as.character(MASS::fractions(pn)),
+          adj = c(0,0)-1/4, cex = 3/4, xpd = NA, col=col)
+   abline(h = pn, v = pn, col = adjustcolor(col, 1/2), lty = 2, lwd = 1/2)
+ }
> 
> p.ppoints(4)
> p.ppoints(10)
> p.ppoints(10, a = 1/2)
> p.ppoints(21)
> p.ppoints(8) ; p.ppoints(8, a = 1/2, add=TRUE, col="tomato")
> ## Don't show: 
> if(!any("MASS" == lNs)) unloadNamespace("MASS")
> ## End(Don't show)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("ppr")
> ### * ppr
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ppr
> ### Title: Projection Pursuit Regression
> ### Aliases: ppr ppr.default ppr.formula
> ### Keywords: regression
> 
> ### ** Examples
> 
> require(graphics)
> 
> # Note: your numerical values may differ
> attach(rock)
> area1 <- area/10000; peri1 <- peri/10000
> rock.ppr <- ppr(log(perm) ~ area1 + peri1 + shape,
+                 data = rock, nterms = 2, max.terms = 5)
> rock.ppr
Call:
ppr(formula = log(perm) ~ area1 + peri1 + shape, data = rock, 
    nterms = 2, max.terms = 5)

Goodness of fit:
 2 terms  3 terms  4 terms  5 terms 
8.737806 5.289517 4.745799 4.490378 
> # Call:
> # ppr.formula(formula = log(perm) ~ area1 + peri1 + shape, data = rock,
> #     nterms = 2, max.terms = 5)
> #
> # Goodness of fit:
> #  2 terms  3 terms  4 terms  5 terms
> # 8.737806 5.289517 4.745799 4.490378
> 
> summary(rock.ppr)
Call:
ppr(formula = log(perm) ~ area1 + peri1 + shape, data = rock, 
    nterms = 2, max.terms = 5)

Goodness of fit:
 2 terms  3 terms  4 terms  5 terms 
8.737806 5.289517 4.745799 4.490378 

Projection direction vectors ('alpha'):
      term 1      term 2     
area1  0.34357179  0.37071027
peri1 -0.93781471 -0.61923542
shape  0.04961846  0.69218595

Coefficients of ridge terms ('beta'):
   term 1    term 2 
1.6079271 0.5460971 
> # .....  (same as above)
> # .....
> #
> # Projection direction vectors ('alpha'):
> #       term 1      term 2
> # area1  0.34357179  0.37071027
> # peri1 -0.93781471 -0.61923542
> # shape  0.04961846  0.69218595
> #
> # Coefficients of ridge terms:
> #    term 1    term 2
> # 1.6079271 0.5460971
> 
> par(mfrow = c(3,2))   # maybe: , pty = "s")
> plot(rock.ppr, main = "ppr(log(perm)~ ., nterms=2, max.terms=5)")
> plot(update(rock.ppr, bass = 5), main = "update(..., bass = 5)")
> plot(update(rock.ppr, sm.method = "gcv", gcvpen = 2),
+      main = "update(..., sm.method=\"gcv\", gcvpen=2)")
> cbind(perm = rock$perm, prediction = round(exp(predict(rock.ppr)), 1))
     perm prediction
1     6.3        5.9
2     6.3        6.5
3     6.3       12.1
4     6.3        7.4
5    17.1       16.3
6    17.1       13.2
7    17.1       41.2
8    17.1       10.6
9   119.0      101.9
10  119.0       73.0
11  119.0       46.8
12  119.0      121.7
13   82.4      107.8
14   82.4       79.7
15   82.4       98.3
16   82.4      128.8
17   58.6       66.8
18   58.6       33.2
19   58.6       58.7
20   58.6      111.6
21  142.0      118.9
22  142.0      128.4
23  142.0       91.9
24  142.0      188.5
25  740.0      341.7
26  740.0      577.9
27  740.0      895.7
28  740.0     1041.0
29  890.0      560.8
30  890.0      721.8
31  890.0      937.9
32  890.0      848.5
33  950.0      806.4
34  950.0     1085.4
35  950.0      945.6
36  950.0      848.5
37  100.0      154.1
38  100.0      178.0
39  100.0      321.5
40  100.0      232.7
41 1300.0     1067.0
42 1300.0      697.8
43 1300.0     1236.6
44 1300.0     1301.8
45  580.0      485.1
46  580.0      285.1
47  580.0      644.3
48  580.0      571.5
> detach()
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("prcomp")
> ### * prcomp
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: prcomp
> ### Title: Principal Components Analysis
> ### Aliases: prcomp prcomp.formula prcomp.default plot.prcomp
> ###   predict.prcomp print.prcomp summary.prcomp print.summary.prcomp
> ### Keywords: multivariate
> 
> ### ** Examples
> 
> C <- chol(S <- toeplitz(.9 ^ (0:31))) # Cov.matrix and its root
> all.equal(S, crossprod(C))
[1] TRUE
> set.seed(17)
> X <- matrix(rnorm(32000), 1000, 32)
> Z <- X %*% C  ## ==>  cov(Z) ~=  C'C = S
> all.equal(cov(Z), S, tolerance = 0.08)
[1] TRUE
> pZ <- prcomp(Z, tol = 0.1)
> summary(pZ) # only ~14 PCs (out of 32)
Importance of first k=14 (out of 32) components:
                          PC1    PC2    PC3     PC4     PC5     PC6     PC7
Standard deviation     3.6415 2.7178 1.8447 1.39430 1.10207 0.90922 0.76951
Proportion of Variance 0.4173 0.2324 0.1071 0.06118 0.03822 0.02602 0.01864
Cumulative Proportion  0.4173 0.6498 0.7569 0.81806 0.85628 0.88230 0.90094
                           PC8     PC9    PC10    PC11    PC12    PC13   PC14
Standard deviation     0.67490 0.60833 0.51638 0.49048 0.44452 0.40326 0.3904
Proportion of Variance 0.01433 0.01165 0.00839 0.00757 0.00622 0.00512 0.0048
Cumulative Proportion  0.91527 0.92692 0.93531 0.94288 0.94910 0.95422 0.9590
> ## or choose only 3 PCs more directly:
> pz3 <- prcomp(Z, rank. = 3)
> summary(pz3) # same numbers as the first 3 above
Importance of first k=3 (out of 32) components:
                          PC1    PC2    PC3
Standard deviation     3.6415 2.7178 1.8447
Proportion of Variance 0.4173 0.2324 0.1071
Cumulative Proportion  0.4173 0.6498 0.7569
> stopifnot(ncol(pZ$rotation) == 14, ncol(pz3$rotation) == 3,
+           all.equal(pz3$sdev, pZ$sdev, tolerance = 1e-15)) # exactly equal typically
> 
> 
> 
> 
> cleanEx()
> nameEx("predict.HoltWinters")
> ### * predict.HoltWinters
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: predict.HoltWinters
> ### Title: Prediction Function for Fitted Holt-Winters Models
> ### Aliases: predict.HoltWinters
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> m <- HoltWinters(co2)
> p <- predict(m, 50, prediction.interval = TRUE)
> plot(m, p)
> 
> 
> 
> cleanEx()
> nameEx("predict")
> ### * predict
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: predict
> ### Title: Model Predictions
> ### Aliases: predict
> ### Keywords: methods
> 
> ### ** Examples
> 
> ## Don't show: 
> old <- Sys.getlocale("LC_COLLATE")
>        invisible(Sys.setlocale("LC_COLLATE", "C"))
> ## End(Don't show)
> require(utils)
> 
> ## All the "predict" methods found
> ## NB most of the methods in the standard packages are hidden.
> ## Output will depend on what namespaces are (or have been) loaded.
> ## IGNORE_RDIFF_BEGIN
> for(fn in methods("predict"))
+    try({
+        f <- eval(substitute(getAnywhere(fn)$objs[[1]], list(fn = fn)))
+        cat(fn, ":\n\t", deparse(args(f)), "\n")
+        }, silent = TRUE)
predict.Arima :
	 function (object, n.ahead = 1L, newxreg = NULL, se.fit = TRUE,      ...)  NULL 
predict.HoltWinters :
	 function (object, n.ahead = 1L, prediction.interval = FALSE,      level = 0.95, ...)  NULL 
predict.StructTS :
	 function (object, n.ahead = 1L, se.fit = TRUE, ...)  NULL 
predict.ar :
	 function (object, newdata, n.ahead = 1L, se.fit = TRUE, ...)  NULL 
predict.arima0 :
	 function (object, n.ahead = 1L, newxreg = NULL, se.fit = TRUE,      ...)  NULL 
predict.bSpline :
	 function (object, x, nseg = 50, deriv = 0, ...)  NULL 
predict.bs :
	 function (object, newx, ...)  NULL 
predict.glm :
	 function (object, newdata = NULL, type = c("link", "response",      "terms"), se.fit = FALSE, dispersion = NULL, terms = NULL,      na.action = na.pass, ...)  NULL 
predict.glmmPQL :
	 function (object, newdata = NULL, type = c("link", "response"),      level = Q, na.action = na.pass, ...)  NULL 
predict.lda :
	 function (object, newdata, prior = object$prior, dimen, method = c("plug-in",      "predictive", "debiased"), ...)  NULL 
predict.lm :
	 function (object, newdata, se.fit = FALSE, scale = NULL, df = Inf,      interval = c("none", "confidence", "prediction"), level = 0.95,      type = c("response", "terms"), terms = NULL, na.action = na.pass,      pred.var = res.var/weights, weights = 1, rankdeficient = c("warnif",          "simple", "non-estim", "NA", "NAwarn"), tol = 1e-06,      verbose = FALSE, ...)  NULL 
predict.loess :
	 function (object, newdata = NULL, se = FALSE, na.action = na.pass,      ...)  NULL 
predict.lqs :
	 function (object, newdata, na.action = na.pass, ...)  NULL 
predict.mca :
	 function (object, newdata, type = c("row", "factor"), ...)  NULL 
predict.mlm :
	 function (object, newdata, se.fit = FALSE, na.action = na.pass,      ...)  NULL 
predict.nbSpline :
	 function (object, x, nseg = 50, deriv = 0, ...)  NULL 
predict.nls :
	 function (object, newdata, se.fit = FALSE, scale = NULL, df = Inf,      interval = c("none", "confidence", "prediction"), level = 0.95,      ...)  NULL 
predict.npolySpline :
	 function (object, x, nseg = 50, deriv = 0, ...)  NULL 
predict.ns :
	 function (object, newx, ...)  NULL 
predict.pbSpline :
	 function (object, x, nseg = 50, deriv = 0, ...)  NULL 
predict.polr :
	 function (object, newdata, type = c("class", "probs"), ...)  NULL 
predict.poly :
	 function (object, newdata, ...)  NULL 
predict.polySpline :
	 function (object, x, nseg = 50, deriv = 0, ...)  NULL 
predict.ppolySpline :
	 function (object, x, nseg = 50, deriv = 0, ...)  NULL 
predict.ppr :
	 function (object, newdata, ...)  NULL 
predict.prcomp :
	 function (object, newdata, ...)  NULL 
predict.princomp :
	 function (object, newdata, ...)  NULL 
predict.qda :
	 function (object, newdata, prior = object$prior, method = c("plug-in",      "predictive", "debiased", "looCV"), ...)  NULL 
predict.rlm :
	 function (object, newdata = NULL, scale = NULL, ...)  NULL 
predict.smooth.spline :
	 function (object, x, deriv = 0, ...)  NULL 
predict.smooth.spline.fit :
	 function (object, x, deriv = 0, ...)  NULL 
> ## IGNORE_RDIFF_END
> ## Don't show: 
> invisible(Sys.setlocale("LC_COLLATE", old))
> ## End(Don't show)
> 
> 
> 
> cleanEx()
> nameEx("predict.arima")
> ### * predict.arima
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: predict.Arima
> ### Title: Forecast from ARIMA fits
> ### Aliases: predict.Arima
> ### Keywords: ts
> 
> ### ** Examples
> 
> od <- options(digits = 5) # avoid too much spurious accuracy
> predict(arima(lh, order = c(3,0,0)), n.ahead = 12)
$pred
Time Series:
Start = 49 
End = 60 
Frequency = 1 
 [1] 2.4602 2.2708 2.1986 2.2607 2.3470 2.4145 2.4389 2.4315 2.4102 2.3917
[11] 2.3827 2.3827

$se
Time Series:
Start = 49 
End = 60 
Frequency = 1 
 [1] 0.42268 0.50293 0.52452 0.52472 0.53055 0.53692 0.53880 0.53884 0.53910
[10] 0.53952 0.53970 0.53971

> 
> (fit <- arima(USAccDeaths, order = c(0,1,1),
+               seasonal = list(order = c(0,1,1))))

Call:
arima(x = USAccDeaths, order = c(0, 1, 1), seasonal = list(order = c(0, 1, 1)))

Coefficients:
         ma1    sma1
      -0.430  -0.553
s.e.   0.123   0.178

sigma^2 estimated as 99347:  log likelihood = -425.44,  aic = 856.88
> predict(fit, n.ahead = 6)
$pred
        Jan    Feb    Mar    Apr    May    Jun
1979 8336.1 7531.8 8314.6 8616.9 9488.9 9859.8

$se
        Jan    Feb    Mar    Apr    May    Jun
1979 315.45 363.01 405.02 443.06 478.09 510.72

> options(od)
> 
> 
> 
> cleanEx()
> nameEx("predict.glm")
> ### * predict.glm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: predict.glm
> ### Title: Predict Method for GLM Fits
> ### Aliases: predict.glm
> ### Keywords: models regression
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## example from Venables and Ripley (2002, pp. 190-2.)
> ldose <- rep(0:5, 2)
> numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16)
> sex <- factor(rep(c("M", "F"), c(6, 6)))
> SF <- cbind(numdead, numalive = 20-numdead)
> budworm.lg <- glm(SF ~ sex*ldose, family = binomial)
> summary(budworm.lg)

Call:
glm(formula = SF ~ sex * ldose, family = binomial)

Coefficients:
            Estimate Std. Error z value Pr(>|z|)    
(Intercept)  -2.9935     0.5527  -5.416 6.09e-08 ***
sexM          0.1750     0.7783   0.225    0.822    
ldose         0.9060     0.1671   5.422 5.89e-08 ***
sexM:ldose    0.3529     0.2700   1.307    0.191    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 124.8756  on 11  degrees of freedom
Residual deviance:   4.9937  on  8  degrees of freedom
AIC: 43.104

Number of Fisher Scoring iterations: 4

> 
> plot(c(1,32), c(0,1), type = "n", xlab = "dose",
+      ylab = "prob", log = "x")
> text(2^ldose, numdead/20, as.character(sex))
> ld <- seq(0, 5, 0.1)
> lines(2^ld, predict(budworm.lg, data.frame(ldose = ld,
+    sex = factor(rep("M", length(ld)), levels = levels(sex))),
+    type = "response"))
> lines(2^ld, predict(budworm.lg, data.frame(ldose = ld,
+    sex = factor(rep("F", length(ld)), levels = levels(sex))),
+    type = "response"))
> 
> 
> 
> cleanEx()
> nameEx("predict.lm")
> ### * predict.lm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: predict.lm
> ### Title: Predict method for Linear Model Fits
> ### Aliases: predict.lm
> ### Keywords: regression
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Predictions
> x <- rnorm(15)
> y <- x + rnorm(15)
> predict(lm(y ~ x))
         1          2          3          4          5          6          7 
-0.6270917  0.2550840 -0.8548779  1.7923222  0.4139268 -0.8383687  0.5858991 
         8          9         10         11         12         13         14 
 0.8591183  0.6821126 -0.2774594  1.7013932  0.4796306 -0.6214146 -2.3566523 
        15 
 1.2801229 
> new <- data.frame(x = seq(-3, 3, 0.5))
> predict(lm(y ~ x), new, se.fit = TRUE)
$fit
          1           2           3           4           5           6 
-3.21182462 -2.66733702 -2.12284942 -1.57836182 -1.03387422 -0.48938662 
          7           8           9          10          11          12 
 0.05510098  0.59958858  1.14407618  1.68856379  2.23305139  2.77753899 
         13 
 3.32202659 

$se.fit
        1         2         3         4         5         6         7         8 
0.7545246 0.6449274 0.5380108 0.4357521 0.3423511 0.2672616 0.2292452 0.2461261 
        9        10        11        12        13 
0.3090353 0.3966456 0.4960370 0.6013965 0.7100725 

$df
[1] 13

$residual.scale
[1] 0.8832292

> pred.w.plim <- predict(lm(y ~ x), new, interval = "prediction")
> pred.w.clim <- predict(lm(y ~ x), new, interval = "confidence")
> matplot(new$x, cbind(pred.w.clim, pred.w.plim[,-1]),
+         lty = c(1,2,2,3,3), type = "l", ylab = "predicted y")
> 
> ## Prediction intervals, special cases
> ##  The first three of these throw warnings
> w <- 1 + x^2
> fit <- lm(y ~ x)
> wfit <- lm(y ~ x, weights = w)
> predict(fit, interval = "prediction")
Warning in predict.lm(fit, interval = "prediction") :
  predictions on current data refer to _future_ responses

          fit        lwr         upr
1  -0.6270917 -2.6311852  1.37700188
2   0.2550840 -1.7160306  2.22619860
3  -0.8548779 -2.8806560  1.17090027
4   1.7923222 -0.3158417  3.90048609
5   0.4139268 -1.5600794  2.38793297
6  -0.8383687 -2.8624006  1.18566313
7   0.5858991 -1.3942769  2.56607519
8   0.8591183 -1.1372816  2.85551813
9   0.6821126 -1.3028836  2.66710884
10 -0.2774594 -2.2586227  1.70370390
11  1.7013932 -0.3922734  3.79505973
12  0.4796306 -1.4963611  2.45562227
13 -0.6214146 -2.6250348  1.38220556
14 -2.3566523 -4.6435457 -0.06975883
15  1.2801229 -0.7562734  3.31651909
> predict(wfit, interval = "prediction")
Warning in predict.lm(wfit, interval = "prediction") :
  predictions on current data refer to _future_ responses

Warning in predict.lm(wfit, interval = "prediction") :
  assuming prediction variance inversely proportional to weights used for fitting

          fit        lwr        upr
1  -0.6795401 -2.7152943  1.3562142
2   0.2726114 -2.0676598  2.6128825
3  -0.9253947 -2.7873163  0.9365269
4   1.9317861  0.5179227  3.3456495
5   0.4440538 -1.8206181  2.7087257
6  -0.9075760 -2.7817747  0.9666227
7   0.6296673 -1.5219043  2.7812390
8   0.9245587 -1.0206112  2.8697287
9   0.7335126 -1.3468740  2.8138992
10 -0.3021743 -2.5805044  1.9761557
11  1.8336444  0.3867018  3.2805870
12  0.5149694 -1.7094216  2.7393604
13 -0.6734127 -2.7135353  1.3667099
14 -2.5462925 -3.8073357 -1.2852494
15  1.3789581 -0.2749888  3.0329051
> predict(wfit, new, interval = "prediction")
Warning in predict.lm(wfit, new, interval = "prediction") :
  Assuming constant prediction variance even though model fit is weighted

           fit         lwr        upr
1  -3.46929871 -6.02669183 -0.9119056
2  -2.88162137 -5.38456191 -0.3786808
3  -2.29394403 -4.75159401  0.1637059
4  -1.70626669 -4.12830217  0.7157688
5  -1.11858935 -3.51511782  1.2779391
6  -0.53091201 -2.91236575  1.8505417
7   0.05676532 -2.32024444  2.4337751
8   0.64444266 -1.73881338  3.0276987
9   1.23212000 -1.16798909  3.6322291
10  1.81979734 -0.60755066  4.2471453
11  2.40747468 -0.05715376  4.8721031
12  2.99515202  0.48364874  5.5066553
13  3.58282936  1.01538228  6.1502764
> predict(wfit, new, interval = "prediction", weights = (new$x)^2)
           fit        lwr         upr
1  -3.46929871 -4.7661745 -2.17242292
2  -2.88162137 -4.1752786 -1.58796414
3  -2.29394403 -3.6870669 -0.90082115
4  -1.70626669 -3.3884596 -0.02407382
5  -1.11858935 -3.5151178  1.27793911
6  -0.53091201 -5.2286061  4.16678211
7   0.05676532       -Inf         Inf
8   0.64444266 -4.0541654  5.34305071
9   1.23212000 -1.1679891  3.63222909
10  1.81979734  0.1299644  3.50963027
11  2.40747468  1.0020775  3.81287188
12  2.99515202  1.6850045  4.30529950
13  3.58282936  2.2662384  4.89942029
> predict(wfit, new, interval = "prediction", weights = ~x^2)
           fit        lwr         upr
1  -3.46929871 -4.7661745 -2.17242292
2  -2.88162137 -4.1752786 -1.58796414
3  -2.29394403 -3.6870669 -0.90082115
4  -1.70626669 -3.3884596 -0.02407382
5  -1.11858935 -3.5151178  1.27793911
6  -0.53091201 -5.2286061  4.16678211
7   0.05676532       -Inf         Inf
8   0.64444266 -4.0541654  5.34305071
9   1.23212000 -1.1679891  3.63222909
10  1.81979734  0.1299644  3.50963027
11  2.40747468  1.0020775  3.81287188
12  2.99515202  1.6850045  4.30529950
13  3.58282936  2.2662384  4.89942029
> 
> ##-- From  aov(.) example ---- predict(.. terms)
> npk.aov <- aov(yield ~ block + N*P*K, npk)
> (termL <- attr(terms(npk.aov), "term.labels"))
[1] "block" "N"     "P"     "K"     "N:P"   "N:K"   "P:K"   "N:P:K"
> (pt <- predict(npk.aov, type = "terms"))
    block      N          P          K        N:P    N:K        P:K N:P:K
1  -0.850 -4.925  0.2083333 -0.9583333  0.9416667  1.175  0.4250000     0
2  -0.850  4.925  0.2083333  0.9583333 -2.8250000  1.175 -0.1416667     0
3  -0.850 -4.925 -0.2083333  0.9583333  0.9416667  1.175 -0.1416667     0
4  -0.850  4.925 -0.2083333 -0.9583333  0.9416667 -3.525 -0.1416667     0
5   2.575  4.925 -0.2083333  0.9583333  0.9416667  1.175 -0.1416667     0
6   2.575  4.925  0.2083333 -0.9583333 -2.8250000 -3.525  0.4250000     0
7   2.575 -4.925 -0.2083333 -0.9583333  0.9416667  1.175 -0.1416667     0
8   2.575 -4.925  0.2083333  0.9583333  0.9416667  1.175 -0.1416667     0
9   5.900 -4.925  0.2083333  0.9583333  0.9416667  1.175 -0.1416667     0
10  5.900  4.925  0.2083333 -0.9583333 -2.8250000 -3.525  0.4250000     0
11  5.900  4.925 -0.2083333  0.9583333  0.9416667  1.175 -0.1416667     0
12  5.900 -4.925 -0.2083333 -0.9583333  0.9416667  1.175 -0.1416667     0
13 -4.750  4.925 -0.2083333  0.9583333  0.9416667  1.175 -0.1416667     0
14 -4.750  4.925  0.2083333 -0.9583333 -2.8250000 -3.525  0.4250000     0
15 -4.750 -4.925 -0.2083333 -0.9583333  0.9416667  1.175 -0.1416667     0
16 -4.750 -4.925  0.2083333  0.9583333  0.9416667  1.175 -0.1416667     0
17 -4.350  4.925  0.2083333  0.9583333 -2.8250000  1.175 -0.1416667     0
18 -4.350 -4.925 -0.2083333  0.9583333  0.9416667  1.175 -0.1416667     0
19 -4.350  4.925 -0.2083333 -0.9583333  0.9416667 -3.525 -0.1416667     0
20 -4.350 -4.925  0.2083333 -0.9583333  0.9416667  1.175  0.4250000     0
21  1.475  4.925 -0.2083333 -0.9583333  0.9416667 -3.525 -0.1416667     0
22  1.475  4.925  0.2083333  0.9583333 -2.8250000  1.175 -0.1416667     0
23  1.475 -4.925  0.2083333 -0.9583333  0.9416667  1.175  0.4250000     0
24  1.475 -4.925 -0.2083333  0.9583333  0.9416667  1.175 -0.1416667     0
attr(,"constant")
[1] 54.875
> pt. <- predict(npk.aov, type = "terms", terms = termL[1:4])
> stopifnot(all.equal(pt[,1:4], pt.,
+                     tolerance = 1e-12, check.attributes = FALSE))
> 
> 
> 
> cleanEx()
> nameEx("predict.loess")
> ### * predict.loess
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: predict.loess
> ### Title: Predict LOESS Curve or Surface
> ### Aliases: predict.loess
> ### Keywords: smooth
> 
> ### ** Examples
> 
> cars.lo <- loess(dist ~ speed, cars)
> predict(cars.lo, data.frame(speed = seq(5, 30, 1)), se = TRUE)
$fit
        1         2         3         4         5         6         7         8 
 7.797353 10.002308 12.499786 15.281082 18.446568 21.865315 25.517015 29.350386 
        9        10        11        12        13        14        15        16 
33.230660 37.167935 41.205226 45.055736 48.355889 49.824812 51.986702 56.461318 
       17        18        19        20        21        22        23        24 
61.959729 68.569313 76.316068 85.212121 95.324047        NA        NA        NA 
       25        26 
       NA        NA 

$se.fit
       1        2        3        4        5        6        7        8 
7.568120 5.945831 4.990827 4.545284 4.308639 4.115049 3.789542 3.716231 
       9       10       11       12       13       14       15       16 
3.776947 4.091747 4.709568 4.245427 4.035929 3.753410 4.004705 4.043190 
      17       18       19       20       21       22       23       24 
4.026105 4.074664 4.570818 5.954217 8.302014       NA       NA       NA 
      25       26 
      NA       NA 

$residual.scale
[1] 15.29496

$df
[1] 44.6179

> # to get extrapolation
> cars.lo2 <- loess(dist ~ speed, cars,
+   control = loess.control(surface = "direct"))
> predict(cars.lo2, data.frame(speed = seq(5, 30, 1)), se = TRUE)
$fit
         1          2          3          4          5          6          7 
  7.741006   9.926596  12.442424  15.281082  18.425712  21.865315  25.713413 
         8          9         10         11         12         13         14 
 29.350386  33.230660  37.167935  41.205226  45.781544  48.355889  50.067148 
        15         16         17         18         19         20         21 
 51.986702  56.445263  62.025404  68.569313  76.193111  85.053364  95.300523 
        22         23         24         25         26 
106.974661 120.092581 134.665851 150.698545 168.190283 

$se.fit
        1         2         3         4         5         6         7         8 
 7.565991  5.959097  5.012013  4.550013  4.321596  4.119331  3.939804  3.720098 
        9        10        11        12        13        14        15        16 
 3.780877  4.096004  4.714469  4.398936  4.040129  4.184257  4.008873  4.061865 
       17        18        19        20        21        22        23        24 
 4.033998  4.078904  4.584606  5.952480  8.306901 11.601911 15.792480 20.864660 
       25        26 
26.823827 33.683999 

$residual.scale
[1] 15.31087

$df
[1] 44.55085

> 
> 
> 
> cleanEx()
> nameEx("predict.nls")
> ### * predict.nls
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: predict.nls
> ### Title: Predicting from Nonlinear Least Squares Fits
> ### Aliases: predict.nls
> ### Keywords: nonlinear regression models
> 
> ### ** Examples
> 
> ## Don't show: 
> od <- options(digits = 5)
> ## End(Don't show)
> require(graphics)
> 
> fm <- nls(demand ~ SSasympOrig(Time, A, lrc), data = BOD)
> predict(fm)              # fitted values at observed times
[1]  7.8874 12.5250 15.2517 16.8549 17.7975 18.6776
> ## Form data plot and smooth line for the predictions
> opar <- par(las = 1)
> plot(demand ~ Time, data = BOD, col = 4,
+      main = "BOD data and fitted first-order curve",
+      xlim = c(0,7), ylim = c(0, 20) )
> tt <- seq(0, 8, length.out = 101)
> lines(tt, predict(fm, list(Time = tt)))
> par(opar)
> ## Don't show: 
> options(od)
> ## End(Don't show)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("predict.smooth.spline")
> ### * predict.smooth.spline
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: predict.smooth.spline
> ### Title: Predict from Smoothing Spline Fit
> ### Aliases: predict.smooth.spline
> ### Keywords: smooth
> 
> ### ** Examples
> 
> require(graphics)
> 
> attach(cars)
> cars.spl <- smooth.spline(speed, dist, df = 6.4)
> ## Don't show: 
> print.default(cars.spl)
$x
 [1]  4  7  8  9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25

$y
 [1]  5.833836 12.575603 15.037250 17.795007 20.935898 24.228479 28.488199
 [8] 33.969567 38.578804 40.986090 43.494687 47.156344 50.862366 52.692043
[15] 54.754531 66.036824 75.010419 85.290273 95.014670

$w
 [1] 2 2 1 1 3 2 4 4 4 3 2 3 4 3 5 1 1 4 1

$yin
 [1]  6.00000 13.00000 16.00000 10.00000 26.00000 22.50000 21.50000 35.00000
 [9] 50.50000 33.33333 36.00000 40.66667 64.50000 50.00000 50.40000 66.00000
[17] 54.00000 93.75000 85.00000

$tol
[1] 7e-06

$data
$data$x
 [1]  4  4  7  7  8  9 10 10 10 11 11 12 12 12 12 13 13 13 13 14 14 14 14 15 15
[26] 15 16 16 17 17 17 18 18 18 18 19 19 19 20 20 20 20 20 22 23 24 24 24 24 25

$data$y
 [1]   2  10   4  22  16  10  18  26  34  17  28  14  20  24  28  26  34  34  46
[20]  26  36  60  80  20  26  54  32  40  32  40  50  42  56  76  84  36  46  68
[39]  32  48  52  56  64  66  54  70  92  93 120  85

$data$w
[1] 1


$no.weights
[1] TRUE

$n
[1] 50

$lev
 [1] 0.8934338 0.4379689 0.1842314 0.1529048 0.3641292 0.2022506 0.3399561
 [8] 0.3123173 0.3248036 0.2745481 0.1965797 0.2777060 0.3368286 0.2546068
[15] 0.5210306 0.1743541 0.1531692 0.6249904 0.3750748

$cv
[1] FALSE

$cv.crit
[1] 257.2678

$pen.crit
[1] 3015.936

$crit
[1] 3

$df
[1] 6.400884

$spar
[1] 0.4873957

$ratio
[1] 6.575971e-05

$lambda
[1] 0.0008526606

$iparms
 icrit  ispar   iter        errorI 
     3      0     13      0     NA 

$auxM
NULL

$fit
$knot
 [1] 0.0000000 0.0000000 0.0000000 0.0000000 0.1428571 0.1904762 0.2380952
 [8] 0.2857143 0.3333333 0.3809524 0.4285714 0.4761905 0.5238095 0.5714286
[15] 0.6190476 0.6666667 0.7142857 0.7619048 0.8571429 0.9047619 0.9523810
[22] 1.0000000 1.0000000 1.0000000 1.0000000

$nk
[1] 21

$min
[1] 4

$range
[1] 21

$coef
 [1]  5.833836  8.018090 10.929996 14.991332 17.703639 20.964154 24.055137
 [8] 28.186176 34.129352 39.113818 40.888197 43.249931 47.080201 51.367328
[15] 52.624683 54.840036 62.675393 74.718525 85.502238 91.843521 95.014670

attr(,"class")
[1] "smooth.spline.fit"

$call
smooth.spline(x = speed, y = dist, df = 6.4)

attr(,"class")
[1] "smooth.spline"
> ## End(Don't show)
> 
> ## "Proof" that the derivatives are okay, by comparing with approximation
> diff.quot <- function(x, y) {
+   ## Difference quotient (central differences where available)
+   n <- length(x); i1 <- 1:2; i2 <- (n-1):n
+   c(diff(y[i1]) / diff(x[i1]), (y[-i1] - y[-i2]) / (x[-i1] - x[-i2]),
+     diff(y[i2]) / diff(x[i2]))
+ }
> 
> xx <- unique(sort(c(seq(0, 30, by = .2), kn <- unique(speed))))
> i.kn <- match(kn, xx)   # indices of knots within xx
> op <- par(mfrow = c(2,2))
> plot(speed, dist, xlim = range(xx), main = "Smooth.spline & derivatives")
> lines(pp <- predict(cars.spl, xx), col = "red")
> points(kn, pp$y[i.kn], pch = 3, col = "dark red")
> mtext("s(x)", col = "red")
> for(d in 1:3){
+   n <- length(pp$x)
+   plot(pp$x, diff.quot(pp$x,pp$y), type = "l", xlab = "x", ylab = "",
+        col = "blue", col.main = "red",
+        main = paste0("s" ,paste(rep("'", d), collapse = ""), "(x)"))
+   mtext("Difference quotient approx.(last)", col = "blue")
+   lines(pp <- predict(cars.spl, xx, deriv = d), col = "red")
+ ## Don't show: 
+   print(pp)
+ ## End(Don't show)
+   points(kn, pp$y[i.kn], pch = 3, col = "dark red")
+   abline(h = 0, lty = 3, col = "gray")
+ }
$x
  [1]  0.0  0.2  0.4  0.6  0.8  1.0  1.2  1.4  1.6  1.8  2.0  2.2  2.4  2.6  2.8
 [16]  3.0  3.2  3.4  3.6  3.8  4.0  4.2  4.4  4.6  4.8  5.0  5.2  5.4  5.6  5.8
 [31]  6.0  6.2  6.4  6.6  6.8  7.0  7.2  7.4  7.6  7.8  8.0  8.2  8.4  8.6  8.8
 [46]  9.0  9.2  9.4  9.6  9.8 10.0 10.2 10.4 10.6 10.8 11.0 11.2 11.4 11.6 11.8
 [61] 12.0 12.2 12.4 12.6 12.8 13.0 13.2 13.4 13.6 13.8 14.0 14.2 14.4 14.6 14.8
 [76] 15.0 15.2 15.4 15.6 15.8 16.0 16.2 16.4 16.6 16.8 17.0 17.2 17.4 17.6 17.8
 [91] 18.0 18.2 18.4 18.6 18.8 19.0 19.2 19.4 19.6 19.8 20.0 20.2 20.4 20.6 20.8
[106] 21.0 21.2 21.4 21.6 21.8 22.0 22.2 22.4 22.6 22.8 23.0 23.2 23.4 23.6 23.8
[121] 24.0 24.2 24.4 24.6 24.8 25.0 25.2 25.4 25.6 25.8 26.0 26.2 26.4 26.6 26.8
[136] 27.0 27.2 27.4 27.6 27.8 28.0 28.2 28.4 28.6 28.8 29.0 29.2 29.4 29.6 29.8
[151] 30.0

$y
  [1]  2.184254  2.184254  2.184254  2.184254  2.184254  2.184254  2.184254
  [8]  2.184254  2.184254  2.184254  2.184254  2.184254  2.184254  2.184254
 [15]  2.184254  2.184254  2.184254  2.184254  2.184254  2.184254  2.184254
 [22]  2.185055  2.187545  2.191723  2.197591  2.205146  2.214391  2.225324
 [29]  2.237946  2.252257  2.268256  2.285944  2.305321  2.326387  2.349141
 [36]  2.373584  2.401852  2.436084  2.476278  2.522435  2.574554  2.635109
 [43]  2.706573  2.788944  2.882223  2.986411  3.081697  3.148275  3.186142
 [50]  3.195300  3.175749  3.166034  3.204703  3.291756  3.427192  3.611011
 [57]  3.834464  4.088800  4.374019  4.690122  5.037108  5.344118  5.540295
 [64]  5.625638  5.600147  5.463821  5.227052  4.900227  4.483347  3.976412
 [71]  3.379422  2.813354  2.399183  2.136910  2.026534  2.068056  2.203151
 [78]  2.373493  2.579082  2.819918  3.096002  3.369475  3.602482  3.795021
 [85]  3.947094  4.058699  4.080338  3.962512  3.705220  3.308463  2.772241
 [92]  2.234965  1.835047  1.572485  1.447281  1.459435  1.588383  1.813566
 [99]  2.134982  2.552632  3.066516  3.621455  4.162272  4.688967  5.201539
[106]  5.699989  6.184317  6.654523  7.110606  7.552567  7.980405  8.394132
[113]  8.793758  9.179284  9.550708  9.908031 10.197841 10.366724 10.414682
[120] 10.341713 10.147818  9.918957  9.741090  9.614215  9.538335  9.513448
[127]  9.513448  9.513448  9.513448  9.513448  9.513448  9.513448  9.513448
[134]  9.513448  9.513448  9.513448  9.513448  9.513448  9.513448  9.513448
[141]  9.513448  9.513448  9.513448  9.513448  9.513448  9.513448  9.513448
[148]  9.513448  9.513448  9.513448  9.513448

$x
  [1]  0.0  0.2  0.4  0.6  0.8  1.0  1.2  1.4  1.6  1.8  2.0  2.2  2.4  2.6  2.8
 [16]  3.0  3.2  3.4  3.6  3.8  4.0  4.2  4.4  4.6  4.8  5.0  5.2  5.4  5.6  5.8
 [31]  6.0  6.2  6.4  6.6  6.8  7.0  7.2  7.4  7.6  7.8  8.0  8.2  8.4  8.6  8.8
 [46]  9.0  9.2  9.4  9.6  9.8 10.0 10.2 10.4 10.6 10.8 11.0 11.2 11.4 11.6 11.8
 [61] 12.0 12.2 12.4 12.6 12.8 13.0 13.2 13.4 13.6 13.8 14.0 14.2 14.4 14.6 14.8
 [76] 15.0 15.2 15.4 15.6 15.8 16.0 16.2 16.4 16.6 16.8 17.0 17.2 17.4 17.6 17.8
 [91] 18.0 18.2 18.4 18.6 18.8 19.0 19.2 19.4 19.6 19.8 20.0 20.2 20.4 20.6 20.8
[106] 21.0 21.2 21.4 21.6 21.8 22.0 22.2 22.4 22.6 22.8 23.0 23.2 23.4 23.6 23.8
[121] 24.0 24.2 24.4 24.6 24.8 25.0 25.2 25.4 25.6 25.8 26.0 26.2 26.4 26.6 26.8
[136] 27.0 27.2 27.4 27.6 27.8 28.0 28.2 28.4 28.6 28.8 29.0 29.2 29.4 29.6 29.8
[151] 30.0

$y
  [1]  0.0000000000  0.0000000000  0.0000000000  0.0000000000  0.0000000000
  [6]  0.0000000000  0.0000000000  0.0000000000  0.0000000000  0.0000000000
 [11]  0.0000000000  0.0000000000  0.0000000000  0.0000000000  0.0000000000
 [16]  0.0000000000  0.0000000000  0.0000000000  0.0000000000  0.0000000000
 [21] -0.0002165147  0.0082269950  0.0166705047  0.0251140144  0.0335575241
 [26]  0.0420010338  0.0504445435  0.0588880532  0.0673315629  0.0757750726
 [31]  0.0842185822  0.0926620919  0.1011056016  0.1095491113  0.1179926210
 [36]  0.1264361307  0.1562499762  0.1860638217  0.2158776671  0.2456915126
 [41]  0.2755053581  0.3300457726  0.3845861870  0.4391266015  0.4936670160
 [46]  0.5482074305  0.4046596591  0.2611118877  0.1175641163 -0.0259836551
 [51] -0.1695314264  0.0723860374  0.3143035013  0.5562209652  0.7981384291
 [56]  1.0400558930  1.1944722898  1.3488886866  1.5033050835  1.6577214803
 [61]  1.8121378771  1.2579681380  0.7037983989  0.1496286598 -0.4045410793
 [66] -0.9587108185 -1.4089861032 -1.8592613879 -2.3095366727 -2.7598119574
 [71] -3.2100872422 -2.4505987906 -1.6911103389 -0.9316218873 -0.1721334357
 [76]  0.5873550159  0.7635912416  0.9398274674  1.1160636931  1.2922999189
 [81]  1.4685361447  1.2662005031  1.0638648615  0.8615292198  0.6591935782
 [86]  0.4568579366 -0.2404682740 -0.9377944846 -1.6351206952 -2.3324469058
 [91] -3.0297731165 -2.3429865119 -1.6561999074 -0.9694133029 -0.2826266984
 [96]  0.4041599061  0.8853283585  1.3664968109  1.8476652633  2.3288337157
[101]  2.8100021681  2.7393907024  2.6687792367  2.5981677710  2.5275563053
[106]  2.4569448396  2.3863333739  2.3157219082  2.2451104425  2.1744989768
[111]  2.1038875112  2.0333827893  1.9628780674  1.8923733455  1.8218686237
[116]  1.7513639018  1.1467332516  0.5421026014 -0.0625280487 -0.6671586989
[121] -1.2717893491 -1.0168219086 -0.7618544680 -0.5068870274 -0.2519195869
[126]  0.0030478537  0.0000000000  0.0000000000  0.0000000000  0.0000000000
[131]  0.0000000000  0.0000000000  0.0000000000  0.0000000000  0.0000000000
[136]  0.0000000000  0.0000000000  0.0000000000  0.0000000000  0.0000000000
[141]  0.0000000000  0.0000000000  0.0000000000  0.0000000000  0.0000000000
[146]  0.0000000000  0.0000000000  0.0000000000  0.0000000000  0.0000000000
[151]  0.0000000000

$x
  [1]  0.0  0.2  0.4  0.6  0.8  1.0  1.2  1.4  1.6  1.8  2.0  2.2  2.4  2.6  2.8
 [16]  3.0  3.2  3.4  3.6  3.8  4.0  4.2  4.4  4.6  4.8  5.0  5.2  5.4  5.6  5.8
 [31]  6.0  6.2  6.4  6.6  6.8  7.0  7.2  7.4  7.6  7.8  8.0  8.2  8.4  8.6  8.8
 [46]  9.0  9.2  9.4  9.6  9.8 10.0 10.2 10.4 10.6 10.8 11.0 11.2 11.4 11.6 11.8
 [61] 12.0 12.2 12.4 12.6 12.8 13.0 13.2 13.4 13.6 13.8 14.0 14.2 14.4 14.6 14.8
 [76] 15.0 15.2 15.4 15.6 15.8 16.0 16.2 16.4 16.6 16.8 17.0 17.2 17.4 17.6 17.8
 [91] 18.0 18.2 18.4 18.6 18.8 19.0 19.2 19.4 19.6 19.8 20.0 20.2 20.4 20.6 20.8
[106] 21.0 21.2 21.4 21.6 21.8 22.0 22.2 22.4 22.6 22.8 23.0 23.2 23.4 23.6 23.8
[121] 24.0 24.2 24.4 24.6 24.8 25.0 25.2 25.4 25.6 25.8 26.0 26.2 26.4 26.6 26.8
[136] 27.0 27.2 27.4 27.6 27.8 28.0 28.2 28.4 28.6 28.8 29.0 29.2 29.4 29.6 29.8
[151] 30.0

$y
  [1]  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000
  [7]  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000
 [13]  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000
 [19]  0.00000000  0.00000000  0.04221755  0.04221755  0.04221755  0.04221755
 [25]  0.04221755  0.04221755  0.04221755  0.04221755  0.04221755  0.04221755
 [31]  0.04221755  0.04221755  0.04221755  0.04221755  0.04221755  0.14906923
 [37]  0.14906923  0.14906923  0.14906923  0.14906923  0.27270207  0.27270207
 [43]  0.27270207  0.27270207  0.27270207 -0.71773886 -0.71773886 -0.71773886
 [49] -0.71773886 -0.71773886  1.20958732  1.20958732  1.20958732  1.20958732
 [55]  1.20958732  0.77208198  0.77208198  0.77208198  0.77208198  0.77208198
 [61] -2.77084870 -2.77084870 -2.77084870 -2.77084870 -2.77084870 -2.25137642
 [67] -2.25137642 -2.25137642 -2.25137642 -2.25137642  3.79744226  3.79744226
 [73]  3.79744226  3.79744226  3.79744226  0.88118113  0.88118113  0.88118113
 [79]  0.88118113  0.88118113 -1.01167821 -1.01167821 -1.01167821 -1.01167821
 [85] -1.01167821 -3.48663105 -3.48663105 -3.48663105 -3.48663105 -3.48663105
 [91]  3.43393302  3.43393302  3.43393302  3.43393302  3.43393302  2.40584226
 [97]  2.40584226  2.40584226  2.40584226  2.40584226 -0.35305733 -0.35305733
[103] -0.35305733 -0.35305733 -0.35305733 -0.35305733 -0.35305733 -0.35305733
[109] -0.35305733 -0.35305733 -0.35252361 -0.35252361 -0.35252361 -0.35252361
[115] -0.35252361 -3.02315325 -3.02315325 -3.02315325 -3.02315325 -3.02315325
[121]  1.27483720  1.27483720  1.27483720  1.27483720  1.27483720  1.27483720
[127]  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000
[133]  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000
[139]  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000
[145]  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000  0.00000000
[151]  0.00000000

> detach(); par(op)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("princomp")
> ### * princomp
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: princomp
> ### Title: Principal Components Analysis
> ### Aliases: princomp princomp.formula princomp.default plot.princomp
> ###   print.princomp predict.princomp
> ### Keywords: multivariate
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## The variances of the variables in the
> ## USArrests data vary by orders of magnitude, so scaling is appropriate
> (pc.cr <- princomp(USArrests))  # inappropriate
Call:
princomp(x = USArrests)

Standard deviations:
   Comp.1    Comp.2    Comp.3    Comp.4 
82.890847 14.069560  6.424204  2.457837 

 4  variables and  50 observations.
> princomp(USArrests, cor = TRUE) # =^= prcomp(USArrests, scale=TRUE)
Call:
princomp(x = USArrests, cor = TRUE)

Standard deviations:
   Comp.1    Comp.2    Comp.3    Comp.4 
1.5748783 0.9948694 0.5971291 0.4164494 

 4  variables and  50 observations.
> ## Similar, but different:
> ## The standard deviations differ by a factor of sqrt(49/50)
> 
> summary(pc.cr <- princomp(USArrests, cor = TRUE))
Importance of components:
                          Comp.1    Comp.2    Comp.3     Comp.4
Standard deviation     1.5748783 0.9948694 0.5971291 0.41644938
Proportion of Variance 0.6200604 0.2474413 0.0891408 0.04335752
Cumulative Proportion  0.6200604 0.8675017 0.9566425 1.00000000
> loadings(pc.cr)  # note that blank entries are small but not zero

Loadings:
         Comp.1 Comp.2 Comp.3 Comp.4
Murder    0.536  0.418  0.341  0.649
Assault   0.583  0.188  0.268 -0.743
UrbanPop  0.278 -0.873  0.378  0.134
Rape      0.543 -0.167 -0.818       

               Comp.1 Comp.2 Comp.3 Comp.4
SS loadings      1.00   1.00   1.00   1.00
Proportion Var   0.25   0.25   0.25   0.25
Cumulative Var   0.25   0.50   0.75   1.00
> ## The signs of the columns of the loadings are arbitrary
> plot(pc.cr) # shows a screeplot.
> biplot(pc.cr)
> 
> ## Formula interface
> princomp(~ ., data = USArrests, cor = TRUE)
Call:
princomp(formula = ~., data = USArrests, cor = TRUE)

Standard deviations:
   Comp.1    Comp.2    Comp.3    Comp.4 
1.5748783 0.9948694 0.5971291 0.4164494 

 4  variables and  50 observations.
> 
> ## NA-handling
> USArrests[1, 2] <- NA
> pc.cr <- princomp(~ Murder + Assault + UrbanPop,
+                   data = USArrests, na.action = na.exclude, cor = TRUE)
> 
> ## (Simple) Robust PCA:
> ## Classical:
> (pc.cl  <- princomp(stackloss))
Call:
princomp(x = stackloss)

Standard deviations:
   Comp.1    Comp.2    Comp.3    Comp.4 
13.596589  4.676077  2.617533  1.366320 

 4  variables and  21 observations.
> 
> 
> cleanEx()
> nameEx("print.power.htest")
> ### * print.power.htest
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: print.power.htest
> ### Title: Print Methods for Hypothesis Tests and Power Calculation Objects
> ### Aliases: print.htest print.power.htest
> ### Keywords: htest
> 
> ### ** Examples
> 
> (ptt <- power.t.test(n = 20, delta = 1))

     Two-sample t test power calculation 

              n = 20
          delta = 1
             sd = 1
      sig.level = 0.05
          power = 0.8689528
    alternative = two.sided

NOTE: n is number in *each* group

> print(ptt, digits =  4) # using less digits than default

     Two-sample t test power calculation 

              n = 20
          delta = 1
             sd = 1
      sig.level = 0.05
          power = 0.869
    alternative = two.sided

NOTE: n is number in *each* group

> print(ptt, digits = 12) # using more  "       "     "

     Two-sample t test power calculation 

              n = 20
          delta = 1
             sd = 1
      sig.level = 0.05
          power = 0.868952801692
    alternative = two.sided

NOTE: n is number in *each* group

> 
> 
> 
> cleanEx()
> nameEx("print.ts")
> ### * print.ts
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: print.ts
> ### Title: Printing and Formatting of Time-Series Objects
> ### Aliases: .preformat.ts print.ts
> ### Keywords: ts
> 
> ### ** Examples
> 
> print(ts(1:10, frequency = 7, start = c(12, 2)), calendar = TRUE)
   p1 p2 p3 p4 p5 p6 p7
12     1  2  3  4  5  6
13  7  8  9 10         
> 
> print(sunsp.1 <- window(sunspot.m2014, end=c(1756, 12)))
       Jan   Feb   Mar   Apr   May   Jun   Jul   Aug   Sep   Oct   Nov   Dec
1749  58.0  62.6  70.0  55.7  85.0  83.5  94.8  66.3  75.9  75.5 158.6  85.2
1750  73.3  75.9  89.2  88.3  90.0 100.0  85.4 103.0  91.2  65.7  63.3  75.4
1751  70.0  43.5  45.3  56.4  60.7  50.7  66.3  59.8  23.5  23.2  28.5  44.0
1752  35.0  50.0  71.0  59.3  59.7  39.6  78.4  29.3  27.1  46.6  37.6  40.0
1753  44.0  32.0  45.7  38.0  36.0  31.7  22.0  39.0  28.0  25.0  20.0   6.7
1754   0.0   3.0   1.7  13.7  20.7  26.7  18.8  12.3   8.2  24.1  13.2   4.2
1755  10.2  11.2   6.8   6.5   0.0   0.0   8.6   3.2  17.8  23.7   6.8  20.0
1756  12.5   7.1   5.4   9.4  12.5  12.9   3.6   6.4  11.8  14.3  17.0   9.4
> m <- .preformat.ts(sunsp.1) # a character matrix
> 
> 
> 
> cleanEx()
> nameEx("printCoefmat")
> ### * printCoefmat
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: printCoefmat
> ### Title: Print Coefficient Matrices
> ### Aliases: printCoefmat
> ### Keywords: print
> 
> ### ** Examples
> 
> cmat <- cbind(rnorm(3, 10), sqrt(rchisq(3, 12)))
> cmat <- cbind(cmat, cmat[, 1]/cmat[, 2])
> cmat <- cbind(cmat, 2*pnorm(-cmat[, 3]))
> colnames(cmat) <- c("Estimate", "Std.Err", "Z value", "Pr(>z)")
> printCoefmat(cmat[, 1:3])
     Estimate Std.Err Z value
[1,]   9.3735  4.4447  2.1089
[2,]  10.1836  3.5496  2.8689
[3,]   9.1644  2.7365  3.3490
> printCoefmat(cmat)
     Estimate Std.Err Z value    Pr(>z)    
[1,]   9.3735  4.4447  2.1089 0.0349492 *  
[2,]  10.1836  3.5496  2.8689 0.0041185 ** 
[3,]   9.1644  2.7365  3.3490 0.0008111 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> op <- options(show.coef.Pvalues = FALSE)
> printCoefmat(cmat, digits = 2)
     Estimate Std.Err Z value
[1,]      9.4     4.4     2.1
[2,]     10.2     3.5     2.9
[3,]      9.2     2.7     3.3
> printCoefmat(cmat, digits = 2, P.values = TRUE)
     Estimate Std.Err Z value Pr(>z)    
[1,]      9.4     4.4     2.1  0.035 *  
[2,]     10.2     3.5     2.9  0.004 ** 
[3,]      9.2     2.7     3.3  8e-04 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> options(op) # restore
> 
> 
> 
> cleanEx()
> nameEx("profile.glm")
> ### * profile.glm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: profile.glm
> ### Title: Method for Profiling 'glm' Objects
> ### Aliases: profile.glm
> ### Keywords: regression models
> 
> ### ** Examples
> 
> options(contrasts = c("contr.treatment", "contr.poly"))
> ldose <- rep(0:5, 2)
> numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16)
> sex <- factor(rep(c("M", "F"), c(6, 6)))
> SF <- cbind(numdead, numalive = 20 - numdead)
> budworm.lg <- glm(SF ~ sex*ldose, family = binomial)
> pr1 <- profile(budworm.lg)
> plot(pr1)
> pairs(pr1)
> 
> 
> 
> base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly"))
> cleanEx()
> nameEx("profile.nls")
> ### * profile.nls
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: profile.nls
> ### Title: Method for Profiling 'nls' Objects
> ### Aliases: profile.nls
> ### Keywords: nonlinear regression models
> 
> ### ** Examples
> 
> ## Don't show: 
> od <- options(digits = 4)
> ## End(Don't show)
> # obtain the fitted object
> fm1 <- nls(demand ~ SSasympOrig(Time, A, lrc), data = BOD)
> # get the profile for the fitted model: default level is too extreme
> pr1 <- profile(fm1, alphamax = 0.05)
> # profiled values for the two parameters
> ## IGNORE_RDIFF_BEGIN
> pr1$A
       tau par.vals.A par.vals.lrc
1  -3.0873   13.59751      0.14033
2  -2.4957   14.46884      0.01607
3  -1.8906   15.41836     -0.11787
4  -1.2634   16.49088     -0.26877
5  -0.6080   17.75662     -0.44553
6   0.0000   19.14258     -0.63282
7   0.4998   20.52853     -0.80750
8   0.9465   22.06851     -0.98233
9   1.3780   23.98253     -1.17103
10  1.7908   26.44552     -1.37431
11  2.1841   29.75928     -1.59599
12  2.5559   34.43804     -1.84209
13  2.9028   41.42542     -2.12176
> pr1$lrc
       tau par.vals.A par.vals.lrc
1  -2.9550  42.395076    -2.184153
2  -2.5201  33.446683    -1.836926
3  -2.0570  28.124205    -1.547326
4  -1.5710  24.656989    -1.293871
5  -1.0653  22.245847    -1.063030
6  -0.5412  20.482887    -0.845157
7   0.0000  19.142578    -0.632822
8   0.5539  18.095015    -0.420487
9   1.1022  17.273414    -0.207623
10  1.6298  16.621533     0.007939
11  2.1335  16.086803     0.234850
12  2.6052  15.638807     0.485009
13  3.0276  15.265409     0.779458
> ## IGNORE_RDIFF_END
> # see also example(plot.profile.nls)
> ## Don't show: 
> options(od)
> ## End(Don't show)
> 
> 
> 
> cleanEx()
> nameEx("proj")
> ### * proj
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: proj
> ### Title: Projections of Models
> ### Aliases: proj proj.default proj.lm proj.aov proj.aovlist
> ### Keywords: models
> 
> ### ** Examples
> 
> N <- c(0,1,0,1,1,1,0,0,0,1,1,0,1,1,0,0,1,0,1,0,1,1,0,0)
> P <- c(1,1,0,0,0,1,0,1,1,1,0,0,0,1,0,1,1,0,0,1,0,1,1,0)
> K <- c(1,0,0,1,0,1,1,0,0,1,0,1,0,1,1,0,0,0,1,1,1,0,1,0)
> yield <- c(49.5,62.8,46.8,57.0,59.8,58.5,55.5,56.0,62.8,55.8,69.5,
+ 55.0, 62.0,48.8,45.5,44.2,52.0,51.5,49.8,48.8,57.2,59.0,53.2,56.0)
> 
> npk <- data.frame(block = gl(6,4), N = factor(N), P = factor(P),
+                   K = factor(K), yield = yield)
> npk.aov <- aov(yield ~ block + N*P*K, npk)
> proj(npk.aov)
   (Intercept)  block         N          P         K        N:P    N:K
1       54.875 -0.850 -2.808333 -0.5916667 -1.991667  0.9416667  1.175
2       54.875 -0.850  2.808333 -0.5916667  1.991667 -0.9416667  1.175
3       54.875 -0.850 -2.808333  0.5916667  1.991667 -0.9416667 -1.175
4       54.875 -0.850  2.808333  0.5916667 -1.991667  0.9416667 -1.175
5       54.875  2.575  2.808333  0.5916667  1.991667  0.9416667  1.175
6       54.875  2.575  2.808333 -0.5916667 -1.991667 -0.9416667 -1.175
7       54.875  2.575 -2.808333  0.5916667 -1.991667 -0.9416667  1.175
8       54.875  2.575 -2.808333 -0.5916667  1.991667  0.9416667 -1.175
9       54.875  5.900 -2.808333 -0.5916667  1.991667  0.9416667 -1.175
10      54.875  5.900  2.808333 -0.5916667 -1.991667 -0.9416667 -1.175
11      54.875  5.900  2.808333  0.5916667  1.991667  0.9416667  1.175
12      54.875  5.900 -2.808333  0.5916667 -1.991667 -0.9416667  1.175
13      54.875 -4.750  2.808333  0.5916667  1.991667  0.9416667  1.175
14      54.875 -4.750  2.808333 -0.5916667 -1.991667 -0.9416667 -1.175
15      54.875 -4.750 -2.808333  0.5916667 -1.991667 -0.9416667  1.175
16      54.875 -4.750 -2.808333 -0.5916667  1.991667  0.9416667 -1.175
17      54.875 -4.350  2.808333 -0.5916667  1.991667 -0.9416667  1.175
18      54.875 -4.350 -2.808333  0.5916667  1.991667 -0.9416667 -1.175
19      54.875 -4.350  2.808333  0.5916667 -1.991667  0.9416667 -1.175
20      54.875 -4.350 -2.808333 -0.5916667 -1.991667  0.9416667  1.175
21      54.875  1.475  2.808333  0.5916667 -1.991667  0.9416667 -1.175
22      54.875  1.475  2.808333 -0.5916667  1.991667 -0.9416667  1.175
23      54.875  1.475 -2.808333 -0.5916667 -1.991667  0.9416667  1.175
24      54.875  1.475 -2.808333  0.5916667  1.991667 -0.9416667 -1.175
          P:K   Residuals
1   0.1416667 -1.39166667
2  -0.1416667  4.47500000
3   0.1416667 -5.02500000
4  -0.1416667  1.94166667
5   0.1416667 -5.30000000
6   0.1416667  2.80000000
7  -0.1416667  2.16666667
8  -0.1416667  0.33333333
9  -0.1416667  3.80833333
10  0.1416667 -3.22500000
11  0.1416667  1.07500000
12 -0.1416667 -1.65833333
13  0.1416667  4.22500000
14  0.1416667  0.42500000
15 -0.1416667 -0.50833333
16 -0.1416667 -4.14166667
17 -0.1416667 -2.82500000
18  0.1416667  3.17500000
19 -0.1416667 -1.75833333
20  0.1416667  1.40833333
21 -0.1416667 -0.18333333
22 -0.1416667 -1.65000000
23  0.1416667 -0.01666667
24  0.1416667  1.85000000
attr(,"df")
(Intercept)       block           N           P           K         N:P 
          1           5           1           1           1           1 
        N:K         P:K   Residuals 
          1           1          12 
attr(,"formula")
yield ~ block + N * P * K
attr(,"onedf")
[1] FALSE
attr(,"factors")
attr(,"factors")$`(Intercept)`
[1] "(Intercept)"

attr(,"factors")$block
[1] "block"

attr(,"factors")$N
[1] "N"

attr(,"factors")$P
[1] "P"

attr(,"factors")$K
[1] "K"

attr(,"factors")$`N:P`
[1] "N" "P"

attr(,"factors")$`N:K`
[1] "N" "K"

attr(,"factors")$`P:K`
[1] "P" "K"

attr(,"factors")$Residuals
[1] "block"  "N"      "P"      "K"      "Within"

attr(,"call")
aov(formula = yield ~ block + N * P * K, data = npk)
attr(,"t.factor")
      block N P K N:P N:K P:K N:P:K
yield     0 0 0 0   0   0   0     0
block     1 0 0 0   0   0   0     0
N         0 1 0 0   1   1   0     1
P         0 0 1 0   1   0   1     1
K         0 0 0 1   0   1   1     1
attr(,"class")
[1] "aovproj"
> 
> ## as a test, not particularly sensible
> options(contrasts = c("contr.helmert", "contr.treatment"))
> npk.aovE <- aov(yield ~  N*P*K + Error(block), npk)
> proj(npk.aovE)
(Intercept) :
   (Intercept)
1       54.875
2       54.875
3       54.875
4       54.875
5       54.875
6       54.875
7       54.875
8       54.875
9       54.875
10      54.875
11      54.875
12      54.875
13      54.875
14      54.875
15      54.875
16      54.875
17      54.875
18      54.875
19      54.875
20      54.875
21      54.875
22      54.875
23      54.875
24      54.875
attr(,"df")
attr(,"df")$df
(Intercept) 
          1 

attr(,"onedf")
attr(,"onedf")$onedf
[1] FALSE

attr(,"factors")
attr(,"factors")$`(Intercept)`
[1] "(Intercept)"


block :
       N:P:K  Residuals
1  -1.241667  0.3916667
2  -1.241667  0.3916667
3  -1.241667  0.3916667
4  -1.241667  0.3916667
5   1.241667  1.3333333
6   1.241667  1.3333333
7   1.241667  1.3333333
8   1.241667  1.3333333
9   1.241667  4.6583333
10  1.241667  4.6583333
11  1.241667  4.6583333
12  1.241667  4.6583333
13  1.241667 -5.9916667
14  1.241667 -5.9916667
15  1.241667 -5.9916667
16  1.241667 -5.9916667
17 -1.241667 -3.1083333
18 -1.241667 -3.1083333
19 -1.241667 -3.1083333
20 -1.241667 -3.1083333
21 -1.241667  2.7166667
22 -1.241667  2.7166667
23 -1.241667  2.7166667
24 -1.241667  2.7166667
attr(,"df")
attr(,"df")$df
    N:P:K Residuals 
        1         4 

attr(,"onedf")
attr(,"onedf")$onedf
[1] FALSE

attr(,"factors")
attr(,"factors")$`N:P:K`
[1] "N" "P" "K"

attr(,"factors")$Residuals
[1] "block"


Within :
           N          P         K        N:P    N:K        P:K   Residuals
1  -2.808333 -0.5916667 -1.991667  0.9416667  1.175  0.1416667 -1.39166667
2   2.808333 -0.5916667  1.991667 -0.9416667  1.175 -0.1416667  4.47500000
3  -2.808333  0.5916667  1.991667 -0.9416667 -1.175  0.1416667 -5.02500000
4   2.808333  0.5916667 -1.991667  0.9416667 -1.175 -0.1416667  1.94166667
5   2.808333  0.5916667  1.991667  0.9416667  1.175  0.1416667 -5.30000000
6   2.808333 -0.5916667 -1.991667 -0.9416667 -1.175  0.1416667  2.80000000
7  -2.808333  0.5916667 -1.991667 -0.9416667  1.175 -0.1416667  2.16666667
8  -2.808333 -0.5916667  1.991667  0.9416667 -1.175 -0.1416667  0.33333333
9  -2.808333 -0.5916667  1.991667  0.9416667 -1.175 -0.1416667  3.80833333
10  2.808333 -0.5916667 -1.991667 -0.9416667 -1.175  0.1416667 -3.22500000
11  2.808333  0.5916667  1.991667  0.9416667  1.175  0.1416667  1.07500000
12 -2.808333  0.5916667 -1.991667 -0.9416667  1.175 -0.1416667 -1.65833333
13  2.808333  0.5916667  1.991667  0.9416667  1.175  0.1416667  4.22500000
14  2.808333 -0.5916667 -1.991667 -0.9416667 -1.175  0.1416667  0.42500000
15 -2.808333  0.5916667 -1.991667 -0.9416667  1.175 -0.1416667 -0.50833333
16 -2.808333 -0.5916667  1.991667  0.9416667 -1.175 -0.1416667 -4.14166667
17  2.808333 -0.5916667  1.991667 -0.9416667  1.175 -0.1416667 -2.82500000
18 -2.808333  0.5916667  1.991667 -0.9416667 -1.175  0.1416667  3.17500000
19  2.808333  0.5916667 -1.991667  0.9416667 -1.175 -0.1416667 -1.75833333
20 -2.808333 -0.5916667 -1.991667  0.9416667  1.175  0.1416667  1.40833333
21  2.808333  0.5916667 -1.991667  0.9416667 -1.175 -0.1416667 -0.18333333
22  2.808333 -0.5916667  1.991667 -0.9416667  1.175 -0.1416667 -1.65000000
23 -2.808333 -0.5916667 -1.991667  0.9416667  1.175  0.1416667 -0.01666667
24 -2.808333  0.5916667  1.991667 -0.9416667 -1.175  0.1416667  1.85000000
attr(,"df")
attr(,"df")$df
        N         P         K       N:P       N:K       P:K Residuals 
        1         1         1         1         1         1        12 

attr(,"onedf")
attr(,"onedf")$onedf
[1] FALSE

attr(,"factors")
attr(,"factors")$N
[1] "N"

attr(,"factors")$P
[1] "P"

attr(,"factors")$K
[1] "K"

attr(,"factors")$`N:P`
[1] "N" "P"

attr(,"factors")$`N:K`
[1] "N" "K"

attr(,"factors")$`P:K`
[1] "P" "K"

attr(,"factors")$Residuals
[1] "block"  "Within"


> 
> 
> 
> base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly"))
> cleanEx()
> nameEx("prop.test")
> ### * prop.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: prop.test
> ### Title: Test of Equal or Given Proportions
> ### Aliases: prop.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> heads <- rbinom(1, size = 100, prob = .5)
> prop.test(heads, 100)          # continuity correction TRUE by default

	1-sample proportions test with continuity correction

data:  heads out of 100, null probability 0.5
X-squared = 0.09, df = 1, p-value = 0.7642
alternative hypothesis: true p is not equal to 0.5
95 percent confidence interval:
 0.4183183 0.6201278
sample estimates:
   p 
0.52 

> prop.test(heads, 100, correct = FALSE)

	1-sample proportions test without continuity correction

data:  heads out of 100, null probability 0.5
X-squared = 0.16, df = 1, p-value = 0.6892
alternative hypothesis: true p is not equal to 0.5
95 percent confidence interval:
 0.4231658 0.6153545
sample estimates:
   p 
0.52 

> 
> ## Data from Fleiss (1981), p. 139.
> ## H0: The null hypothesis is that the four populations from which
> ##     the patients were drawn have the same true proportion of smokers.
> ## A:  The alternative is that this proportion is different in at
> ##     least one of the populations.
> 
> smokers  <- c( 83, 90, 129, 70 )
> patients <- c( 86, 93, 136, 82 )
> prop.test(smokers, patients)

	4-sample test for equality of proportions without continuity correction

data:  smokers out of patients
X-squared = 12.6, df = 3, p-value = 0.005585
alternative hypothesis: two.sided
sample estimates:
   prop 1    prop 2    prop 3    prop 4 
0.9651163 0.9677419 0.9485294 0.8536585 

> 
> 
> 
> cleanEx()
> nameEx("prop.trend.test")
> ### * prop.trend.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: prop.trend.test
> ### Title: Test for trend in proportions
> ### Aliases: prop.trend.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> smokers  <- c( 83, 90, 129, 70 )
> patients <- c( 86, 93, 136, 82 )
> prop.test(smokers, patients)

	4-sample test for equality of proportions without continuity correction

data:  smokers out of patients
X-squared = 12.6, df = 3, p-value = 0.005585
alternative hypothesis: two.sided
sample estimates:
   prop 1    prop 2    prop 3    prop 4 
0.9651163 0.9677419 0.9485294 0.8536585 

> prop.trend.test(smokers, patients)

	Chi-squared Test for Trend in Proportions

data:  smokers out of patients ,
 using scores: 1 2 3 4
X-squared = 8.2249, df = 1, p-value = 0.004132

> prop.trend.test(smokers, patients, c(0,0,0,1))

	Chi-squared Test for Trend in Proportions

data:  smokers out of patients ,
 using scores: 0 0 0 1
X-squared = 12.173, df = 1, p-value = 0.0004848

> 
> 
> 
> cleanEx()
> nameEx("qqnorm")
> ### * qqnorm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: qqnorm
> ### Title: Quantile-Quantile Plots
> ### Aliases: qqnorm qqnorm.default qqplot qqline
> ### Keywords: hplot distribution
> 
> ### ** Examples
> 
> require(graphics)
> 
> y <- rt(200, df = 5)
> qqnorm(y); qqline(y, col = 2)
> qqplot(y, rt(300, df = 5))
> 
> qqnorm(precip, ylab = "Precipitation [in/yr] for 70 US cities")
> 
> ## "QQ-Chisquare" : --------------------------
> y <- rchisq(500, df = 3)
> ## Q-Q plot for Chi^2 data against true theoretical distribution:
> qqplot(qchisq(ppoints(500), df = 3), y,
+        main = expression("Q-Q plot for" ~~ {chi^2}[nu == 3]))
> qqline(y, distribution = function(p) qchisq(p, df = 3),
+        probs = c(0.1, 0.6), col = 2)
> mtext("qqline(*, dist = qchisq(., df=3), prob = c(0.1, 0.6))")
> ## (Note that the above uses ppoints() with a = 1/2, giving the
> ## probability points for quantile type 5: so theoretically, using
> ## qqline(qtype = 5) might be preferable.) 
> 
> ## Figure 1 in Switzer (1976), knee angle data
> switzer <- data.frame(
+     angle = c(-31, -30, -25, -25, -23, -23, -22, -20, -20, -18,
+               -18, -18, -16, -15, -15, -14, -13, -11, -10, - 9,
+               - 8, - 7, - 7, - 7, - 6, - 6, - 4, - 4, - 3, - 2,
+               - 2, - 1,   1,   1,   4,   5,  11,  12,  16,  34,
+               -31, -20, -18, -16, -16, -16, -15, -14, -14, -14,
+               -14, -13, -13, -11, -11, -10, - 9, - 9, - 8, - 7,
+               - 7, - 6, - 6,  -5, - 5, - 5, - 4, - 2, - 2, - 2,
+                 0,   0,   1,   1,   2,   4,   5,   5,   6,  17),
+     sex = gl(2, 40, labels = c("Female", "Male")))
> 
> ks.test(angle ~ sex, data = switzer)

	Exact two-sample Kolmogorov-Smirnov test

data:  angle by sex
D = 0.225, p-value = 0.212
alternative hypothesis: two-sided

> d <- with(switzer, split(angle, sex))
> with(d, qqplot(Female, Male, pch = 19, xlim = c(-31, 31), ylim = c(-31, 31),
+                conf.level = 0.945, 
+                conf.args = list(col = "lightgrey", exact = TRUE))
+ )
> abline(a = 0, b = 1)
> 
> ## agreement with ks.test
> set.seed(1)
> x <- rnorm(50)
> y <- rnorm(50, mean = .5, sd = .95)
> ex <- TRUE
> ### p = 0.112
> (pval <- ks.test(x, y, exact = ex)$p.value)
[1] 0.1123852
> ## 88.8% confidence band with bisecting line
> ## touching the lower bound
> qqplot(x, y, pch = 19, conf.level = 1 - pval, 
+        conf.args = list(exact = ex, col = "lightgrey"))
> abline(a = 0, b = 1)
> 
> 
> 
> 
> cleanEx()
> nameEx("quade.test")
> ### * quade.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: quade.test
> ### Title: Quade Test
> ### Aliases: quade.test quade.test.default quade.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Conover (1999, p. 375f):
> ## Numbers of five brands of a new hand lotion sold in seven stores
> ## during one week.
> y <- matrix(c( 5,  4,  7, 10, 12,
+                1,  3,  1,  0,  2,
+               16, 12, 22, 22, 35,
+                5,  4,  3,  5,  4,
+               10,  9,  7, 13, 10,
+               19, 18, 28, 37, 58,
+               10,  7,  6,  8,  7),
+             nrow = 7, byrow = TRUE,
+             dimnames =
+             list(Store = as.character(1:7),
+                  Brand = LETTERS[1:5]))
> y
     Brand
Store  A  B  C  D  E
    1  5  4  7 10 12
    2  1  3  1  0  2
    3 16 12 22 22 35
    4  5  4  3  5  4
    5 10  9  7 13 10
    6 19 18 28 37 58
    7 10  7  6  8  7
> (qTst <- quade.test(y))

	Quade test

data:  y
Quade F = 3.8293, num df = 4, denom df = 24, p-value = 0.01519

> 
> ## Show equivalence of different versions of test :
> utils::str(dy <- as.data.frame(as.table(y)))
'data.frame':	35 obs. of  3 variables:
 $ Store: Factor w/ 7 levels "1","2","3","4",..: 1 2 3 4 5 6 7 1 2 3 ...
 $ Brand: Factor w/ 5 levels "A","B","C","D",..: 1 1 1 1 1 1 1 2 2 2 ...
 $ Freq : num  5 1 16 5 10 19 10 4 3 12 ...
> qT. <- quade.test(Freq ~ Brand|Store, data = dy)
> qT.$data.name <- qTst$data.name
> stopifnot(all.equal(qTst, qT., tolerance = 1e-15))
> dys <- dy[order(dy[,"Freq"]),]
> qTs <- quade.test(Freq ~ Brand|Store, data = dys)
> qTs$data.name <- qTst$data.name
> stopifnot(all.equal(qTst, qTs, tolerance = 1e-15))
> 
> 
> 
> cleanEx()
> nameEx("quantile")
> ### * quantile
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: quantile
> ### Title: Sample Quantiles
> ### Aliases: quantile quantile.default
> ### Keywords: univar
> 
> ### ** Examples
> 
> quantile(x <- rnorm(1001)) # Extremes & Quartiles by default
         0%         25%         50%         75%        100% 
-3.00804860 -0.69731820 -0.03472603  0.68924373  3.81027668 
> quantile(x,  probs = c(0.1, 0.5, 1, 2, 5, 10, 50, NA)/100)
       0.1%        0.5%          1%          2%          5%         10% 
-2.99694930 -2.59232767 -2.42431731 -2.24515257 -1.72663060 -1.33880074 
        50%             
-0.03472603          NA 
> 
> ### Compare different types
> quantAll <- function(x, prob, ...)
+   t(vapply(1:9, function(typ) quantile(x, probs = prob, type = typ, ...),
+            quantile(x, prob, type=1, ...)))
> p <- c(0.1, 0.5, 1, 2, 5, 10, 50)/100
> signif(quantAll(x, p), 4)
        0.1%   0.5%     1%     2%     5%    10%      50%
 [1,] -2.997 -2.592 -2.424 -2.245 -1.727 -1.339 -0.03473
 [2,] -2.997 -2.592 -2.424 -2.245 -1.727 -1.339 -0.03473
 [3,] -3.008 -2.596 -2.433 -2.265 -1.733 -1.344 -0.03592
 [4,] -3.008 -2.596 -2.433 -2.264 -1.733 -1.344 -0.03532
 [5,] -3.002 -2.594 -2.428 -2.255 -1.730 -1.341 -0.03473
 [6,] -3.008 -2.596 -2.432 -2.264 -1.733 -1.343 -0.03473
 [7,] -2.997 -2.592 -2.424 -2.245 -1.727 -1.339 -0.03473
 [8,] -3.004 -2.595 -2.430 -2.258 -1.731 -1.342 -0.03473
 [9,] -3.004 -2.595 -2.429 -2.257 -1.730 -1.341 -0.03473
> 
> ## 0% and 100% are equal to min(), max() for all types:
> stopifnot(t(quantAll(x, prob=0:1)) == range(x))
> 
> ## for complex numbers:
> z <- complex(real = x, imaginary = -10*x)
> signif(quantAll(z, p), 4)
              0.1%         0.5%           1%           2%           5%
 [1,] -3.00+29.97i -2.59+25.92i -2.42+24.24i -2.25+22.45i -1.73+17.27i
 [2,] -3.00+29.97i -2.59+25.92i -2.42+24.24i -2.25+22.45i -1.73+17.27i
 [3,] -3.01+30.08i -2.60+25.96i -2.43+24.33i -2.26+22.65i -1.73+17.33i
 [4,] -3.01+30.08i -2.60+25.96i -2.43+24.33i -2.26+22.64i -1.73+17.33i
 [5,] -3.00+30.02i -2.59+25.94i -2.43+24.28i -2.25+22.55i -1.73+17.30i
 [6,] -3.01+30.08i -2.60+25.96i -2.43+24.32i -2.26+22.64i -1.73+17.33i
 [7,] -3.00+29.97i -2.59+25.92i -2.42+24.24i -2.25+22.45i -1.73+17.27i
 [8,] -3.00+30.04i -2.59+25.95i -2.43+24.30i -2.26+22.58i -1.73+17.31i
 [9,] -3.00+30.04i -2.59+25.95i -2.43+24.29i -2.26+22.57i -1.73+17.30i
               10%             50%
 [1,] -1.34+13.39i -0.0347+0.3473i
 [2,] -1.34+13.39i -0.0347+0.3473i
 [3,] -1.34+13.44i -0.0359+0.3592i
 [4,] -1.34+13.44i -0.0353+0.3532i
 [5,] -1.34+13.41i -0.0347+0.3473i
 [6,] -1.34+13.43i -0.0347+0.3473i
 [7,] -1.34+13.39i -0.0347+0.3473i
 [8,] -1.34+13.42i -0.0347+0.3473i
 [9,] -1.34+13.41i -0.0347+0.3473i
> 
> 
> 
> cleanEx()
> nameEx("r2dtable")
> ### * r2dtable
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: r2dtable
> ### Title: Random 2-way Tables with Given Marginals
> ### Aliases: r2dtable
> ### Keywords: distribution
> 
> ### ** Examples
> 
> ## Fisher's Tea Drinker data.
> TeaTasting <-
+ matrix(c(3, 1, 1, 3),
+        nrow = 2,
+        dimnames = list(Guess = c("Milk", "Tea"),
+                        Truth = c("Milk", "Tea")))
> ## Simulate permutation test for independence based on the maximum
> ## Pearson residuals (rather than their sum).
> rowTotals <- rowSums(TeaTasting)
> colTotals <- colSums(TeaTasting)
> nOfCases <- sum(rowTotals)
> expected <- outer(rowTotals, colTotals) / nOfCases
> maxSqResid <- function(x) max((x - expected) ^ 2 / expected)
> simMaxSqResid <-
+     sapply(r2dtable(1000, rowTotals, colTotals), maxSqResid)
> sum(simMaxSqResid >= maxSqResid(TeaTasting)) / 1000
[1] 0.465
> ## Fisher's exact test gives p = 0.4857 ...
> 
> 
> 
> cleanEx()
> nameEx("rWishart")
> ### * rWishart
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: rWishart
> ### Title: Random Wishart Distributed Matrices
> ### Aliases: rWishart
> ### Keywords: multivariate
> 
> ### ** Examples
> 
> ## Artificial
> S <- toeplitz((10:1)/10)
> set.seed(11)
> R <- rWishart(1000, 20, S)
> dim(R)  #  10 10  1000
[1]   10   10 1000
> mR <- apply(R, 1:2, mean)  # ~= E[ Wish(S, 20) ] = 20 * S
> stopifnot(all.equal(mR, 20*S, tolerance = .009))
> 
> ## See Details, the variance is
> Va <- 20*(S^2 + tcrossprod(diag(S)))
> vR <- apply(R, 1:2, var)
> stopifnot(all.equal(vR, Va, tolerance = 1/16))
> 
> 
> 
> cleanEx()
> nameEx("read.ftable")
> ### * read.ftable
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: read.ftable
> ### Title: Manipulate Flat Contingency Tables
> ### Aliases: read.ftable write.ftable format.ftable print.ftable
> ### Keywords: category
> 
> ### ** Examples
> 
> ## Agresti (1990), page 157, Table 5.8.
> ## Not in ftable standard format, but o.k.
> file <- tempfile()
> cat("             Intercourse\n",
+     "Race  Gender     Yes  No\n",
+     "White Male        43 134\n",
+     "      Female      26 149\n",
+     "Black Male        29  23\n",
+     "      Female      22  36\n",
+     file = file)
> ft1 <- read.ftable(file)
> ft1
             Intercourse Yes  No
Race  Gender                    
White Male                43 134
      Female              26 149
Black Male                29  23
      Female              22  36
> unlink(file)
> 
> ## Agresti (1990), page 297, Table 8.16.
> ## Almost o.k., but misses the name of the row variable.
> file <- tempfile()
> cat("                      \"Tonsil Size\"\n",
+     "            \"Not Enl.\" \"Enl.\" \"Greatly Enl.\"\n",
+     "Noncarriers       497     560           269\n",
+     "Carriers           19      29            24\n",
+     file = file)
> ft <- read.ftable(file, skip = 2,
+                   row.var.names = "Status",
+                   col.vars = list("Tonsil Size" =
+                       c("Not Enl.", "Enl.", "Greatly Enl.")))
> ft
            Tonsil Size Not Enl. Enl. Greatly Enl.
Status                                            
Noncarriers                  497  560          269
Carriers                      19   29           24
> unlink(file)
> 
> ft22 <- ftable(Titanic, row.vars = 2:1, col.vars = 4:3)
> write.ftable(ft22, quote = FALSE) # is the same as
             Survived    No         Yes      
             Age      Child Adult Child Adult
Sex    Class                                 
Male   1st                0   118     5    57
       2nd                0   154    11    14
       3rd               35   387    13    75
       Crew               0   670     0   192
Female 1st                0     4     1   140
       2nd                0    13    13    80
       3rd               17    89    14    76
       Crew               0     3     0    20
> print(ft22)#method="non.compact" is default
             Survived    No         Yes      
             Age      Child Adult Child Adult
Sex    Class                                 
Male   1st                0   118     5    57
       2nd                0   154    11    14
       3rd               35   387    13    75
       Crew               0   670     0   192
Female 1st                0     4     1   140
       2nd                0    13    13    80
       3rd               17    89    14    76
       Crew               0     3     0    20
> print(ft22, method="row.compact")
             Survived    No         Yes      
Sex    Class Age      Child Adult Child Adult
Male   1st                0   118     5    57
       2nd                0   154    11    14
       3rd               35   387    13    75
       Crew               0   670     0   192
Female 1st                0     4     1   140
       2nd                0    13    13    80
       3rd               17    89    14    76
       Crew               0     3     0    20
> print(ft22, method="col.compact")
       Survived    No         Yes      
       Age      Child Adult Child Adult
Sex    Class                           
Male   1st          0   118     5    57
       2nd          0   154    11    14
       3rd         35   387    13    75
       Crew         0   670     0   192
Female 1st          0     4     1   140
       2nd          0    13    13    80
       3rd         17    89    14    76
       Crew         0     3     0    20
> print(ft22, method="compact")
       Survived       No         Yes      
Sex    Class | Age Child Adult Child Adult
Male   1st             0   118     5    57
       2nd             0   154    11    14
       3rd            35   387    13    75
       Crew            0   670     0   192
Female 1st             0     4     1   140
       2nd             0    13    13    80
       3rd            17    89    14    76
       Crew            0     3     0    20
> 
> ## using 'justify' and 'quote' :
> format(ftable(wool + tension ~ breaks, warpbreaks),
+        justify = "none", quote = FALSE)
      [,1]     [,2]      [,3] [,4] [,5] [,6] [,7] [,8]
 [1,] ""       "wool"    "A"  ""   ""   "B"  ""   ""  
 [2,] ""       "tension" "L"  "M"  "H"  "L"  "M"  "H" 
 [3,] "breaks" ""        ""   ""   ""   ""   ""   ""  
 [4,] "10"     ""        "0"  "0"  "1"  "0"  "0"  "0" 
 [5,] "12"     ""        "0"  "1"  "0"  "0"  "0"  "0" 
 [6,] "13"     ""        "0"  "0"  "0"  "0"  "0"  "1" 
 [7,] "14"     ""        "0"  "0"  "0"  "1"  "0"  "0" 
 [8,] "15"     ""        "0"  "0"  "1"  "0"  "0"  "2" 
 [9,] "16"     ""        "0"  "0"  "0"  "0"  "1"  "1" 
[10,] "17"     ""        "0"  "1"  "0"  "0"  "0"  "1" 
[11,] "18"     ""        "0"  "2"  "1"  "0"  "0"  "0" 
[12,] "19"     ""        "0"  "0"  "0"  "1"  "1"  "0" 
[13,] "20"     ""        "0"  "0"  "0"  "1"  "0"  "1" 
[14,] "21"     ""        "0"  "1"  "1"  "0"  "1"  "1" 
[15,] "24"     ""        "0"  "0"  "1"  "0"  "0"  "1" 
[16,] "25"     ""        "1"  "0"  "0"  "0"  "0"  "0" 
[17,] "26"     ""        "2"  "0"  "1"  "0"  "1"  "0" 
[18,] "27"     ""        "0"  "0"  "0"  "1"  "0"  "0" 
[19,] "28"     ""        "0"  "0"  "1"  "0"  "1"  "1" 
[20,] "29"     ""        "0"  "1"  "0"  "2"  "1"  "0" 
[21,] "30"     ""        "1"  "1"  "0"  "0"  "0"  "0" 
[22,] "31"     ""        "0"  "0"  "0"  "1"  "0"  "0" 
[23,] "35"     ""        "0"  "1"  "0"  "0"  "0"  "0" 
[24,] "36"     ""        "0"  "1"  "1"  "0"  "0"  "0" 
[25,] "39"     ""        "0"  "0"  "0"  "0"  "2"  "0" 
[26,] "41"     ""        "0"  "0"  "0"  "1"  "0"  "0" 
[27,] "42"     ""        "0"  "0"  "0"  "0"  "1"  "0" 
[28,] "43"     ""        "0"  "0"  "1"  "0"  "0"  "0" 
[29,] "44"     ""        "0"  "0"  "0"  "1"  "0"  "0" 
[30,] "51"     ""        "1"  "0"  "0"  "0"  "0"  "0" 
[31,] "52"     ""        "1"  "0"  "0"  "0"  "0"  "0" 
[32,] "54"     ""        "1"  "0"  "0"  "0"  "0"  "0" 
[33,] "67"     ""        "1"  "0"  "0"  "0"  "0"  "0" 
[34,] "70"     ""        "1"  "0"  "0"  "0"  "0"  "0" 
> ## Don't show: 
>  op <- options(warn = 2) # no warnings allowed
>  stopifnot(dim(format(ft)) == 4:5,
+ 	   dim(format(ftable(UCBAdmissions))) == c(6,9))
>  meths <- c("non.compact", "row.compact", "col.compact", "compact")
>  dimform <-
+     function(ft) sapply(meths, function(M) dim(format(ft, method = M)))
>  m.eq    <- function(M,m) all.equal(unname(M), m, tolerance = 0)
>  ## All format(..) w/o warnings:
>  stopifnot(m.eq(print(dimform(ft22)),
+ 		rbind(11:10, rep(7:6, each = 2))),
+ 	   m.eq(print(dimform(ftable(Titanic, row.vars = integer()))),
+ 		rbind(rep(6:5,2), 33)))
     non.compact row.compact col.compact compact
[1,]          11          10          11      10
[2,]           7           7           6       6
     non.compact row.compact col.compact compact
[1,]           6           5           6       5
[2,]          33          33          33      33
>  options(op)
> ## End(Don't show)
> 
> 
> 
> cleanEx()
> nameEx("rect.hclust")
> ### * rect.hclust
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: rect.hclust
> ### Title: Draw Rectangles Around Hierarchical Clusters
> ### Aliases: rect.hclust
> ### Keywords: aplot cluster
> 
> ### ** Examples
> 
> require(graphics)
> 
> hca <- hclust(dist(USArrests))
> plot(hca)
> rect.hclust(hca, k = 3, border = "red")
> x <- rect.hclust(hca, h = 50, which = c(2,7), border = 3:4)
> x
[[1]]
       Alabama         Alaska       Delaware      Louisiana    Mississippi 
             1              2              8             18             24 
South Carolina 
            40 

[[2]]
 Connecticut        Idaho      Indiana       Kansas     Kentucky      Montana 
           7           12           14           16           17           26 
    Nebraska         Ohio Pennsylvania         Utah 
          27           35           38           44 

> 
> 
> 
> cleanEx()
> nameEx("relevel")
> ### * relevel
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: relevel
> ### Title: Reorder Levels of Factor
> ### Aliases: relevel relevel.default relevel.factor relevel.ordered
> ### Keywords: utilities models
> 
> ### ** Examples
> 
> warpbreaks$tension <- relevel(warpbreaks$tension, ref = "M")
> summary(lm(breaks ~ wool + tension, data = warpbreaks))

Call:
lm(formula = breaks ~ wool + tension, data = warpbreaks)

Residuals:
    Min      1Q  Median      3Q     Max 
-19.500  -8.083  -2.139   6.472  30.722 

Coefficients:
            Estimate Std. Error t value Pr(>|t|)    
(Intercept)   29.278      3.162   9.260    2e-12 ***
woolB         -5.778      3.162  -1.827   0.0736 .  
tensionL      10.000      3.872   2.582   0.0128 *  
tensionH      -4.722      3.872  -1.219   0.2284    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 11.62 on 50 degrees of freedom
Multiple R-squared:  0.2691,	Adjusted R-squared:  0.2253 
F-statistic: 6.138 on 3 and 50 DF,  p-value: 0.00123

> 
> 
> 
> cleanEx()
> nameEx("reorder.dendrogram")
> ### * reorder.dendrogram
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: reorder.dendrogram
> ### Title: Reorder a Dendrogram
> ### Aliases: reorder.dendrogram
> ### Keywords: manip
> 
> ### ** Examples
> 
> require(graphics)
> 
> set.seed(123)
> x <- rnorm(10)
> hc <- hclust(dist(x))
> dd <- as.dendrogram(hc)
> dd.reorder <- reorder(dd, 10:1)
> plot(dd, main = "random dendrogram 'dd'")
> 
> op <- par(mfcol = 1:2)
> plot(dd.reorder, main = "reorder(dd, 10:1)")
> plot(reorder(dd, 10:1, agglo.FUN = mean), main = "reorder(dd, 10:1, mean)")
> par(op)
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("reorder.factor")
> ### * reorder.factor
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: reorder.default
> ### Title: Reorder Levels of a Factor
> ### Aliases: reorder reorder.default
> ### Keywords: utilities
> 
> ### ** Examples
> 
> require(graphics)
> 
> bymedian <- with(InsectSprays, reorder(spray, count, median))
> boxplot(count ~ bymedian, data = InsectSprays,
+         xlab = "Type of spray", ylab = "Insect count",
+         main = "InsectSprays data", varwidth = TRUE,
+         col = "lightgray")
> 
> bymedianR <- with(InsectSprays, reorder(spray, count, median, decreasing=TRUE))
> stopifnot(exprs = {
+     identical(attr(bymedian, "scores") -> sc,
+               attr(bymedianR,"scores"))
+     identical(nms <- names(sc), LETTERS[1:6])
+     identical(levels(bymedian ), nms[isc <- order(sc)])
+     identical(levels(bymedianR), nms[rev(isc)])
+ })
> 
> 
> 
> cleanEx()
> nameEx("replications")
> ### * replications
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: replications
> ### Title: Number of Replications of Terms
> ### Aliases: replications
> ### Keywords: models
> 
> ### ** Examples
> 
> ## From Venables and Ripley (2002) p.165.
> N <- c(0,1,0,1,1,1,0,0,0,1,1,0,1,1,0,0,1,0,1,0,1,1,0,0)
> P <- c(1,1,0,0,0,1,0,1,1,1,0,0,0,1,0,1,1,0,0,1,0,1,1,0)
> K <- c(1,0,0,1,0,1,1,0,0,1,0,1,0,1,1,0,0,0,1,1,1,0,1,0)
> yield <- c(49.5,62.8,46.8,57.0,59.8,58.5,55.5,56.0,62.8,55.8,69.5,
+ 55.0, 62.0,48.8,45.5,44.2,52.0,51.5,49.8,48.8,57.2,59.0,53.2,56.0)
> 
> npk <- data.frame(block = gl(6,4), N = factor(N), P = factor(P),
+                   K = factor(K), yield = yield)
> replications(~ . - yield, npk)
block     N     P     K 
    4    12    12    12 
> 
> 
> 
> cleanEx()
> nameEx("reshape")
> ### * reshape
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: reshape
> ### Title: Reshape Grouped Data
> ### Aliases: reshape
> ### Keywords: manip
> 
> ### ** Examples
> 
> summary(Indometh) # data in long format
 Subject      time            conc       
 1:11    Min.   :0.250   Min.   :0.0500  
 4:11    1st Qu.:0.750   1st Qu.:0.1100  
 2:11    Median :2.000   Median :0.3400  
 5:11    Mean   :2.886   Mean   :0.5918  
 6:11    3rd Qu.:5.000   3rd Qu.:0.8325  
 3:11    Max.   :8.000   Max.   :2.7200  
> 
> ## long to wide (direction = "wide") requires idvar and timevar at a minimum
> reshape(Indometh, direction = "wide", idvar = "Subject", timevar = "time")
   Subject conc.0.25 conc.0.5 conc.0.75 conc.1 conc.1.25 conc.2 conc.3 conc.4
1        1      1.50     0.94      0.78   0.48      0.37   0.19   0.12   0.11
12       2      2.03     1.63      0.71   0.70      0.64   0.36   0.32   0.20
23       3      2.72     1.49      1.16   0.80      0.80   0.39   0.22   0.12
34       4      1.85     1.39      1.02   0.89      0.59   0.40   0.16   0.11
45       5      2.05     1.04      0.81   0.39      0.30   0.23   0.13   0.11
56       6      2.31     1.44      1.03   0.84      0.64   0.42   0.24   0.17
   conc.5 conc.6 conc.8
1    0.08   0.07   0.05
12   0.25   0.12   0.08
23   0.11   0.08   0.08
34   0.10   0.07   0.07
45   0.08   0.10   0.06
56   0.13   0.10   0.09
> 
> ## can also explicitly specify name of combined variable
> wide <- reshape(Indometh, direction = "wide", idvar = "Subject",
+                 timevar = "time", v.names = "conc", sep= "_")
> wide
   Subject conc_0.25 conc_0.5 conc_0.75 conc_1 conc_1.25 conc_2 conc_3 conc_4
1        1      1.50     0.94      0.78   0.48      0.37   0.19   0.12   0.11
12       2      2.03     1.63      0.71   0.70      0.64   0.36   0.32   0.20
23       3      2.72     1.49      1.16   0.80      0.80   0.39   0.22   0.12
34       4      1.85     1.39      1.02   0.89      0.59   0.40   0.16   0.11
45       5      2.05     1.04      0.81   0.39      0.30   0.23   0.13   0.11
56       6      2.31     1.44      1.03   0.84      0.64   0.42   0.24   0.17
   conc_5 conc_6 conc_8
1    0.08   0.07   0.05
12   0.25   0.12   0.08
23   0.11   0.08   0.08
34   0.10   0.07   0.07
45   0.08   0.10   0.06
56   0.13   0.10   0.09
> 
> ## reverse transformation
> reshape(wide, direction = "long")
       Subject time conc
1.0.25       1 0.25 1.50
2.0.25       2 0.25 2.03
3.0.25       3 0.25 2.72
4.0.25       4 0.25 1.85
5.0.25       5 0.25 2.05
6.0.25       6 0.25 2.31
1.0.5        1 0.50 0.94
2.0.5        2 0.50 1.63
3.0.5        3 0.50 1.49
4.0.5        4 0.50 1.39
5.0.5        5 0.50 1.04
6.0.5        6 0.50 1.44
1.0.75       1 0.75 0.78
2.0.75       2 0.75 0.71
3.0.75       3 0.75 1.16
4.0.75       4 0.75 1.02
5.0.75       5 0.75 0.81
6.0.75       6 0.75 1.03
1.1          1 1.00 0.48
2.1          2 1.00 0.70
3.1          3 1.00 0.80
4.1          4 1.00 0.89
5.1          5 1.00 0.39
6.1          6 1.00 0.84
1.1.25       1 1.25 0.37
2.1.25       2 1.25 0.64
3.1.25       3 1.25 0.80
4.1.25       4 1.25 0.59
5.1.25       5 1.25 0.30
6.1.25       6 1.25 0.64
1.2          1 2.00 0.19
2.2          2 2.00 0.36
3.2          3 2.00 0.39
4.2          4 2.00 0.40
5.2          5 2.00 0.23
6.2          6 2.00 0.42
1.3          1 3.00 0.12
2.3          2 3.00 0.32
3.3          3 3.00 0.22
4.3          4 3.00 0.16
5.3          5 3.00 0.13
6.3          6 3.00 0.24
1.4          1 4.00 0.11
2.4          2 4.00 0.20
3.4          3 4.00 0.12
4.4          4 4.00 0.11
5.4          5 4.00 0.11
6.4          6 4.00 0.17
1.5          1 5.00 0.08
2.5          2 5.00 0.25
3.5          3 5.00 0.11
4.5          4 5.00 0.10
5.5          5 5.00 0.08
6.5          6 5.00 0.13
1.6          1 6.00 0.07
2.6          2 6.00 0.12
3.6          3 6.00 0.08
4.6          4 6.00 0.07
5.6          5 6.00 0.10
6.6          6 6.00 0.10
1.8          1 8.00 0.05
2.8          2 8.00 0.08
3.8          3 8.00 0.08
4.8          4 8.00 0.07
5.8          5 8.00 0.06
6.8          6 8.00 0.09
> reshape(wide, idvar = "Subject", varying = list(2:12),
+         v.names = "conc", direction = "long")
     Subject time conc
1.1        1    1 1.50
2.1        2    1 2.03
3.1        3    1 2.72
4.1        4    1 1.85
5.1        5    1 2.05
6.1        6    1 2.31
1.2        1    2 0.94
2.2        2    2 1.63
3.2        3    2 1.49
4.2        4    2 1.39
5.2        5    2 1.04
6.2        6    2 1.44
1.3        1    3 0.78
2.3        2    3 0.71
3.3        3    3 1.16
4.3        4    3 1.02
5.3        5    3 0.81
6.3        6    3 1.03
1.4        1    4 0.48
2.4        2    4 0.70
3.4        3    4 0.80
4.4        4    4 0.89
5.4        5    4 0.39
6.4        6    4 0.84
1.5        1    5 0.37
2.5        2    5 0.64
3.5        3    5 0.80
4.5        4    5 0.59
5.5        5    5 0.30
6.5        6    5 0.64
1.6        1    6 0.19
2.6        2    6 0.36
3.6        3    6 0.39
4.6        4    6 0.40
5.6        5    6 0.23
6.6        6    6 0.42
1.7        1    7 0.12
2.7        2    7 0.32
3.7        3    7 0.22
4.7        4    7 0.16
5.7        5    7 0.13
6.7        6    7 0.24
1.8        1    8 0.11
2.8        2    8 0.20
3.8        3    8 0.12
4.8        4    8 0.11
5.8        5    8 0.11
6.8        6    8 0.17
1.9        1    9 0.08
2.9        2    9 0.25
3.9        3    9 0.11
4.9        4    9 0.10
5.9        5    9 0.08
6.9        6    9 0.13
1.10       1   10 0.07
2.10       2   10 0.12
3.10       3   10 0.08
4.10       4   10 0.07
5.10       5   10 0.10
6.10       6   10 0.10
1.11       1   11 0.05
2.11       2   11 0.08
3.11       3   11 0.08
4.11       4   11 0.07
5.11       5   11 0.06
6.11       6   11 0.09
> 
> ## times need not be numeric
> df <- data.frame(id = rep(1:4, rep(2,4)),
+                  visit = rep(c("Before","After"), 4),
+                  x = rnorm(4), y = runif(4))
> df
  id  visit          x          y
1  1 Before -0.6264538 0.62911404
2  1  After  0.1836433 0.06178627
3  2 Before -0.8356286 0.20597457
4  2  After  1.5952808 0.17655675
5  3 Before -0.6264538 0.62911404
6  3  After  0.1836433 0.06178627
7  4 Before -0.8356286 0.20597457
8  4  After  1.5952808 0.17655675
> reshape(df, timevar = "visit", idvar = "id", direction = "wide")
  id   x.Before  y.Before   x.After    y.After
1  1 -0.6264538 0.6291140 0.1836433 0.06178627
3  2 -0.8356286 0.2059746 1.5952808 0.17655675
5  3 -0.6264538 0.6291140 0.1836433 0.06178627
7  4 -0.8356286 0.2059746 1.5952808 0.17655675
> ## warns that y is really varying
> reshape(df, timevar = "visit", idvar = "id", direction = "wide", v.names = "x")
Warning in reshapeWide(data, idvar = idvar, timevar = timevar, varying = varying,  :
  some constant variables (y) are really varying
  id         y   x.Before   x.After
1  1 0.6291140 -0.6264538 0.1836433
3  2 0.2059746 -0.8356286 1.5952808
5  3 0.6291140 -0.6264538 0.1836433
7  4 0.2059746 -0.8356286 1.5952808
> 
> 
> ##  unbalanced 'long' data leads to NA fill in 'wide' form
> df2 <- df[1:7, ]
> df2
  id  visit          x          y
1  1 Before -0.6264538 0.62911404
2  1  After  0.1836433 0.06178627
3  2 Before -0.8356286 0.20597457
4  2  After  1.5952808 0.17655675
5  3 Before -0.6264538 0.62911404
6  3  After  0.1836433 0.06178627
7  4 Before -0.8356286 0.20597457
> reshape(df2, timevar = "visit", idvar = "id", direction = "wide")
  id   x.Before  y.Before   x.After    y.After
1  1 -0.6264538 0.6291140 0.1836433 0.06178627
3  2 -0.8356286 0.2059746 1.5952808 0.17655675
5  3 -0.6264538 0.6291140 0.1836433 0.06178627
7  4 -0.8356286 0.2059746        NA         NA
> 
> ## Alternative regular expressions for guessing names
> df3 <- data.frame(id = 1:4, age = c(40,50,60,50), dose1 = c(1,2,1,2),
+                   dose2 = c(2,1,2,1), dose4 = c(3,3,3,3))
> reshape(df3, direction = "long", varying = 3:5, sep = "")
    id age time dose
1.1  1  40    1    1
2.1  2  50    1    2
3.1  3  60    1    1
4.1  4  50    1    2
1.2  1  40    2    2
2.2  2  50    2    1
3.2  3  60    2    2
4.2  4  50    2    1
1.4  1  40    4    3
2.4  2  50    4    3
3.4  3  60    4    3
4.4  4  50    4    3
> 
> 
> ## an example that isn't longitudinal data
> state.x77 <- as.data.frame(state.x77)
> long <- reshape(state.x77, idvar = "state", ids = row.names(state.x77),
+                 times = names(state.x77), timevar = "Characteristic",
+                 varying = list(names(state.x77)), direction = "long")
> 
> reshape(long, direction = "wide")
                                   state Population Income Illiteracy Life Exp
Alabama.Population               Alabama       3615   3624        2.1    69.05
Alaska.Population                 Alaska        365   6315        1.5    69.31
Arizona.Population               Arizona       2212   4530        1.8    70.55
Arkansas.Population             Arkansas       2110   3378        1.9    70.66
California.Population         California      21198   5114        1.1    71.71
Colorado.Population             Colorado       2541   4884        0.7    72.06
Connecticut.Population       Connecticut       3100   5348        1.1    72.48
Delaware.Population             Delaware        579   4809        0.9    70.06
Florida.Population               Florida       8277   4815        1.3    70.66
Georgia.Population               Georgia       4931   4091        2.0    68.54
Hawaii.Population                 Hawaii        868   4963        1.9    73.60
Idaho.Population                   Idaho        813   4119        0.6    71.87
Illinois.Population             Illinois      11197   5107        0.9    70.14
Indiana.Population               Indiana       5313   4458        0.7    70.88
Iowa.Population                     Iowa       2861   4628        0.5    72.56
Kansas.Population                 Kansas       2280   4669        0.6    72.58
Kentucky.Population             Kentucky       3387   3712        1.6    70.10
Louisiana.Population           Louisiana       3806   3545        2.8    68.76
Maine.Population                   Maine       1058   3694        0.7    70.39
Maryland.Population             Maryland       4122   5299        0.9    70.22
Massachusetts.Population   Massachusetts       5814   4755        1.1    71.83
Michigan.Population             Michigan       9111   4751        0.9    70.63
Minnesota.Population           Minnesota       3921   4675        0.6    72.96
Mississippi.Population       Mississippi       2341   3098        2.4    68.09
Missouri.Population             Missouri       4767   4254        0.8    70.69
Montana.Population               Montana        746   4347        0.6    70.56
Nebraska.Population             Nebraska       1544   4508        0.6    72.60
Nevada.Population                 Nevada        590   5149        0.5    69.03
New Hampshire.Population   New Hampshire        812   4281        0.7    71.23
New Jersey.Population         New Jersey       7333   5237        1.1    70.93
New Mexico.Population         New Mexico       1144   3601        2.2    70.32
New York.Population             New York      18076   4903        1.4    70.55
North Carolina.Population North Carolina       5441   3875        1.8    69.21
North Dakota.Population     North Dakota        637   5087        0.8    72.78
Ohio.Population                     Ohio      10735   4561        0.8    70.82
Oklahoma.Population             Oklahoma       2715   3983        1.1    71.42
Oregon.Population                 Oregon       2284   4660        0.6    72.13
Pennsylvania.Population     Pennsylvania      11860   4449        1.0    70.43
Rhode Island.Population     Rhode Island        931   4558        1.3    71.90
South Carolina.Population South Carolina       2816   3635        2.3    67.96
South Dakota.Population     South Dakota        681   4167        0.5    72.08
Tennessee.Population           Tennessee       4173   3821        1.7    70.11
Texas.Population                   Texas      12237   4188        2.2    70.90
Utah.Population                     Utah       1203   4022        0.6    72.90
Vermont.Population               Vermont        472   3907        0.6    71.64
Virginia.Population             Virginia       4981   4701        1.4    70.08
Washington.Population         Washington       3559   4864        0.6    71.72
West Virginia.Population   West Virginia       1799   3617        1.4    69.48
Wisconsin.Population           Wisconsin       4589   4468        0.7    72.48
Wyoming.Population               Wyoming        376   4566        0.6    70.29
                          Murder HS Grad Frost   Area
Alabama.Population          15.1    41.3    20  50708
Alaska.Population           11.3    66.7   152 566432
Arizona.Population           7.8    58.1    15 113417
Arkansas.Population         10.1    39.9    65  51945
California.Population       10.3    62.6    20 156361
Colorado.Population          6.8    63.9   166 103766
Connecticut.Population       3.1    56.0   139   4862
Delaware.Population          6.2    54.6   103   1982
Florida.Population          10.7    52.6    11  54090
Georgia.Population          13.9    40.6    60  58073
Hawaii.Population            6.2    61.9     0   6425
Idaho.Population             5.3    59.5   126  82677
Illinois.Population         10.3    52.6   127  55748
Indiana.Population           7.1    52.9   122  36097
Iowa.Population              2.3    59.0   140  55941
Kansas.Population            4.5    59.9   114  81787
Kentucky.Population         10.6    38.5    95  39650
Louisiana.Population        13.2    42.2    12  44930
Maine.Population             2.7    54.7   161  30920
Maryland.Population          8.5    52.3   101   9891
Massachusetts.Population     3.3    58.5   103   7826
Michigan.Population         11.1    52.8   125  56817
Minnesota.Population         2.3    57.6   160  79289
Mississippi.Population      12.5    41.0    50  47296
Missouri.Population          9.3    48.8   108  68995
Montana.Population           5.0    59.2   155 145587
Nebraska.Population          2.9    59.3   139  76483
Nevada.Population           11.5    65.2   188 109889
New Hampshire.Population     3.3    57.6   174   9027
New Jersey.Population        5.2    52.5   115   7521
New Mexico.Population        9.7    55.2   120 121412
New York.Population         10.9    52.7    82  47831
North Carolina.Population   11.1    38.5    80  48798
North Dakota.Population      1.4    50.3   186  69273
Ohio.Population              7.4    53.2   124  40975
Oklahoma.Population          6.4    51.6    82  68782
Oregon.Population            4.2    60.0    44  96184
Pennsylvania.Population      6.1    50.2   126  44966
Rhode Island.Population      2.4    46.4   127   1049
South Carolina.Population   11.6    37.8    65  30225
South Dakota.Population      1.7    53.3   172  75955
Tennessee.Population        11.0    41.8    70  41328
Texas.Population            12.2    47.4    35 262134
Utah.Population              4.5    67.3   137  82096
Vermont.Population           5.5    57.1   168   9267
Virginia.Population          9.5    47.8    85  39780
Washington.Population        4.3    63.5    32  66570
West Virginia.Population     6.7    41.6   100  24070
Wisconsin.Population         3.0    54.5   149  54464
Wyoming.Population           6.9    62.9   173  97203
> 
> reshape(long, direction = "wide", new.row.names = unique(long$state))
                        state Population Income Illiteracy Life Exp Murder
Alabama               Alabama       3615   3624        2.1    69.05   15.1
Alaska                 Alaska        365   6315        1.5    69.31   11.3
Arizona               Arizona       2212   4530        1.8    70.55    7.8
Arkansas             Arkansas       2110   3378        1.9    70.66   10.1
California         California      21198   5114        1.1    71.71   10.3
Colorado             Colorado       2541   4884        0.7    72.06    6.8
Connecticut       Connecticut       3100   5348        1.1    72.48    3.1
Delaware             Delaware        579   4809        0.9    70.06    6.2
Florida               Florida       8277   4815        1.3    70.66   10.7
Georgia               Georgia       4931   4091        2.0    68.54   13.9
Hawaii                 Hawaii        868   4963        1.9    73.60    6.2
Idaho                   Idaho        813   4119        0.6    71.87    5.3
Illinois             Illinois      11197   5107        0.9    70.14   10.3
Indiana               Indiana       5313   4458        0.7    70.88    7.1
Iowa                     Iowa       2861   4628        0.5    72.56    2.3
Kansas                 Kansas       2280   4669        0.6    72.58    4.5
Kentucky             Kentucky       3387   3712        1.6    70.10   10.6
Louisiana           Louisiana       3806   3545        2.8    68.76   13.2
Maine                   Maine       1058   3694        0.7    70.39    2.7
Maryland             Maryland       4122   5299        0.9    70.22    8.5
Massachusetts   Massachusetts       5814   4755        1.1    71.83    3.3
Michigan             Michigan       9111   4751        0.9    70.63   11.1
Minnesota           Minnesota       3921   4675        0.6    72.96    2.3
Mississippi       Mississippi       2341   3098        2.4    68.09   12.5
Missouri             Missouri       4767   4254        0.8    70.69    9.3
Montana               Montana        746   4347        0.6    70.56    5.0
Nebraska             Nebraska       1544   4508        0.6    72.60    2.9
Nevada                 Nevada        590   5149        0.5    69.03   11.5
New Hampshire   New Hampshire        812   4281        0.7    71.23    3.3
New Jersey         New Jersey       7333   5237        1.1    70.93    5.2
New Mexico         New Mexico       1144   3601        2.2    70.32    9.7
New York             New York      18076   4903        1.4    70.55   10.9
North Carolina North Carolina       5441   3875        1.8    69.21   11.1
North Dakota     North Dakota        637   5087        0.8    72.78    1.4
Ohio                     Ohio      10735   4561        0.8    70.82    7.4
Oklahoma             Oklahoma       2715   3983        1.1    71.42    6.4
Oregon                 Oregon       2284   4660        0.6    72.13    4.2
Pennsylvania     Pennsylvania      11860   4449        1.0    70.43    6.1
Rhode Island     Rhode Island        931   4558        1.3    71.90    2.4
South Carolina South Carolina       2816   3635        2.3    67.96   11.6
South Dakota     South Dakota        681   4167        0.5    72.08    1.7
Tennessee           Tennessee       4173   3821        1.7    70.11   11.0
Texas                   Texas      12237   4188        2.2    70.90   12.2
Utah                     Utah       1203   4022        0.6    72.90    4.5
Vermont               Vermont        472   3907        0.6    71.64    5.5
Virginia             Virginia       4981   4701        1.4    70.08    9.5
Washington         Washington       3559   4864        0.6    71.72    4.3
West Virginia   West Virginia       1799   3617        1.4    69.48    6.7
Wisconsin           Wisconsin       4589   4468        0.7    72.48    3.0
Wyoming               Wyoming        376   4566        0.6    70.29    6.9
               HS Grad Frost   Area
Alabama           41.3    20  50708
Alaska            66.7   152 566432
Arizona           58.1    15 113417
Arkansas          39.9    65  51945
California        62.6    20 156361
Colorado          63.9   166 103766
Connecticut       56.0   139   4862
Delaware          54.6   103   1982
Florida           52.6    11  54090
Georgia           40.6    60  58073
Hawaii            61.9     0   6425
Idaho             59.5   126  82677
Illinois          52.6   127  55748
Indiana           52.9   122  36097
Iowa              59.0   140  55941
Kansas            59.9   114  81787
Kentucky          38.5    95  39650
Louisiana         42.2    12  44930
Maine             54.7   161  30920
Maryland          52.3   101   9891
Massachusetts     58.5   103   7826
Michigan          52.8   125  56817
Minnesota         57.6   160  79289
Mississippi       41.0    50  47296
Missouri          48.8   108  68995
Montana           59.2   155 145587
Nebraska          59.3   139  76483
Nevada            65.2   188 109889
New Hampshire     57.6   174   9027
New Jersey        52.5   115   7521
New Mexico        55.2   120 121412
New York          52.7    82  47831
North Carolina    38.5    80  48798
North Dakota      50.3   186  69273
Ohio              53.2   124  40975
Oklahoma          51.6    82  68782
Oregon            60.0    44  96184
Pennsylvania      50.2   126  44966
Rhode Island      46.4   127   1049
South Carolina    37.8    65  30225
South Dakota      53.3   172  75955
Tennessee         41.8    70  41328
Texas             47.4    35 262134
Utah              67.3   137  82096
Vermont           57.1   168   9267
Virginia          47.8    85  39780
Washington        63.5    32  66570
West Virginia     41.6   100  24070
Wisconsin         54.5   149  54464
Wyoming           62.9   173  97203
> 
> ## multiple id variables
> df3 <- data.frame(school = rep(1:3, each = 4), class = rep(9:10, 6),
+                   time = rep(c(1,1,2,2), 3), score = rnorm(12))
> wide <- reshape(df3, idvar = c("school", "class"), direction = "wide")
> wide
   school class     score.1     score.2
1       1     9  0.48742905  0.57578135
2       1    10  0.73832471 -0.30538839
5       2     9  1.51178117 -0.62124058
6       2    10  0.38984324 -2.21469989
9       3     9  1.12493092 -0.01619026
10      3    10 -0.04493361  0.94383621
> ## transform back
> reshape(wide)
       school class time     score.1
1.9.1       1     9    1  0.48742905
1.10.1      1    10    1  0.73832471
2.9.1       2     9    1  1.51178117
2.10.1      2    10    1  0.38984324
3.9.1       3     9    1  1.12493092
3.10.1      3    10    1 -0.04493361
1.9.2       1     9    2  0.57578135
1.10.2      1    10    2 -0.30538839
2.9.2       2     9    2 -0.62124058
2.10.2      2    10    2 -2.21469989
3.9.2       3     9    2 -0.01619026
3.10.2      3    10    2  0.94383621
> 
> 
> 
> 
> cleanEx()
> nameEx("runmed")
> ### * runmed
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: runmed
> ### Title: Running Medians - Robust Scatter Plot Smoothing
> ### Aliases: runmed
> ### Keywords: smooth robust
> 
> ### ** Examples
> 
> require(graphics)
> 
> utils::example(nhtemp)

nhtemp> require(stats); require(graphics)

nhtemp> plot(nhtemp, main = "nhtemp data",
nhtemp+   ylab = "Mean annual temperature in New Haven, CT (deg. F)")
> myNHT <- as.vector(nhtemp)
> myNHT[20] <- 2 * nhtemp[20]
> plot(myNHT, type = "b", ylim = c(48, 60), main = "Running Medians Example")
> lines(runmed(myNHT, 7), col = "red")
> 
> ## special: multiple y values for one x
> plot(cars, main = "'cars' data and runmed(dist, 3)")
> lines(cars, col = "light gray", type = "c")
> with(cars, lines(speed, runmed(dist, k = 3), col = 2))
> 
> ## nice quadratic with a few outliers
> y <- ys <- (-20:20)^2
> y [c(1,10,21,41)] <- c(150, 30, 400, 450)
> all(y == runmed(y, 1)) # 1-neighbourhood <==> interpolation
[1] TRUE
> plot(y) ## lines(y, lwd = .1, col = "light gray")
> lines(lowess(seq(y), y, f = 0.3), col = "brown")
> lines(runmed(y, 7), lwd = 2, col = "blue")
> lines(runmed(y, 11), lwd = 2, col = "red")
> 
> ## Lowess is not robust
> y <- ys ; y[21] <- 6666 ; x <- seq(y)
> col <- c("black", "brown","blue")
> plot(y, col = col[1])
> lines(lowess(x, y, f = 0.3), col = col[2])
> lines(runmed(y, 7),      lwd = 2, col = col[3])
> legend(length(y),max(y), c("data", "lowess(y, f = 0.3)", "runmed(y, 7)"),
+        xjust = 1, col = col, lty = c(0, 1, 1), pch = c(1,NA,NA))
> 
> ## An example with initial NA's - used to fail badly (notably for "Turlach"):
> x15 <- c(rep(NA, 4), c(9, 9, 4, 22, 6, 1, 7, 5, 2, 8, 3))
> rS15 <- cbind(Sk.3 = runmed(x15, k = 3, algorithm="S"),
+               Sk.7 = runmed(x15, k = 7, algorithm="S"),
+               Sk.11= runmed(x15, k =11, algorithm="S"))
> rT15 <- cbind(Tk.3 = runmed(x15, k = 3, algorithm="T", print.level=1),
+               Tk.7 = runmed(x15, k = 7, algorithm="T", print.level=1),
+               Tk.9 = runmed(x15, k = 9, algorithm="T", print.level=1),
+               Tk.11= runmed(x15, k =11, algorithm="T", print.level=1))
runmed(x, k=3, endrule='median' ( => iend=0), algorithm='Turlach',
       na.*='+Big_alternate' ( => iNAct=1))
firstNA = 1.
R_heapsort(1, 3,..): l=2:
         j:      0     1     2     3     4     5     6
 window []:    -2B   -2B  -BIG  +BIG  +BIG   +2B   +2B
  nrlist[]: -12345     2     1     2     0-12345-12345
 outlist[]:      4     2     3     2
runmed(x, k=7, endrule='median' ( => iend=0), algorithm='Turlach',
       na.*='+Big_alternate' ( => iNAct=1))
firstNA = 1.
R_heapsort(1, 7,..): l=4:
         j:      0     1     2     3     4     5     6     7     8     9    10    11    12    13    14
 window []:    -2B   -2B   -2B   -2B  -BIG  -BIG     4     9     9  +BIG  +BIG   +2B   +2B   +2B   +2B
  nrlist[]: -12345     4     2     7     3     1     6     5     4     2     0-12345-12345-12345-12345
 outlist[]:     10     5     9     4     8     7     6     3
runmed(x, k=9, endrule='median' ( => iend=0), algorithm='Turlach',
       na.*='+Big_alternate' ( => iNAct=1))
firstNA = 1.
R_heapsort(1, 9,..): l=5:
         j:      0     1     2     3     4     5     6     7     8     9    10    11    12    13    14    15    16    17    18
 window []:    -2B   -2B   -2B   -2B   -2B  -BIG  -BIG     4     6     9     9    22  +BIG  +BIG   +2B   +2B   +2B   +2B   +2B
  nrlist[]: -12345     4     2     7     9     3     1     6     8     5     4     7     2     0-12345-12345-12345-12345-12345
 outlist[]:     13     6    12     5    10     9     7    11     8     4
runmed(x, k=11, endrule='median' ( => iend=0), algorithm='Turlach',
       na.*='+Big_alternate' ( => iNAct=1))
firstNA = 1.
R_heapsort(1, 11,..): l=6:
         j:      0     1     2     3     4     5     6     7     8     9    10    11    12    13    14    15    16    17    18    19    20    21    22
 window []:    -2B   -2B   -2B   -2B   -2B   -2B  -BIG  -BIG     1     4     6     7     9     9    22  +BIG  +BIG   +2B   +2B   +2B   +2B   +2B   +2B
  nrlist[]: -12345     2     4    10     7     9     1     3     9     6     8    10     5     4     7     2     0-12345-12345-12345-12345-12345-12345
 outlist[]:     16     6    15     7    13    12     9    14    10     8    11     6
> cbind(x15, rS15, rT15) # result for k=11  maybe a bit surprising ..
      x15 Sk.3 Sk.7 Sk.11 Tk.3 Tk.7 Tk.9 Tk.11
 [1,]  NA  NaN  NaN   NaN  NaN  NaN  NaN   NaN
 [2,]  NA   NA  NaN   NaN   NA  NaN  NaN   NaN
 [3,]  NA   NA    9     9   NA    9    9     9
 [4,]  NA    9    9     7    9    9    7     7
 [5,]   9    9    9     6    9    9    9     6
 [6,]   9    9    9     7    9    9    6     7
 [7,]   4    9    6     6    9    6    7     6
 [8,]  22    6    7     6    6    7    6     6
 [9,]   6    6    6     6    6    6    6     6
[10,]   1    6    5     6    6    5    6     6
[11,]   7    5    6     6    5    6    5     6
[12,]   5    5    5     6    5    5    5     6
[13,]   2    5    5     5    5    5    5     5
[14,]   8    3    3     3    3    3    3     3
[15,]   3    3    3     3    3    3    3     3
> Tv <- rT15[-(1:3),]
> stopifnot(3 <= Tv, Tv <= 9, 5 <= Tv[1:10,])
> matplot(y = cbind(x15, rT15), type = "b", ylim = c(1,9), pch=1:5, xlab = NA,
+         main = "runmed(x15, k, algo = \"Turlach\")")
> mtext(paste("x15 <-", deparse(x15)))
> points(x15, cex=2)
> legend("bottomleft", legend=c("data", paste("k = ", c(3,7,9,11))),
+        bty="n", col=1:5, lty=1:5, pch=1:5)
> 
> 
> 
> cleanEx()
> nameEx("scatter.smooth")
> ### * scatter.smooth
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: scatter.smooth
> ### Title: Scatter Plot with Smooth Curve Fitted by 'loess'
> ### Aliases: scatter.smooth loess.smooth
> ### Keywords: smooth
> 
> ### ** Examples
> 
> require(graphics)
> 
> with(cars, scatter.smooth(speed, dist))
> ## or with dotted thick smoothed line results :
> with(cars, scatter.smooth(speed, dist, lpars =
+                     list(col = "red", lwd = 3, lty = 3)))
> 
> 
> 
> cleanEx()
> nameEx("screeplot")
> ### * screeplot
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: screeplot
> ### Title: Scree Plots
> ### Aliases: screeplot screeplot.default
> ### Keywords: multivariate
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## The variances of the variables in the
> ## USArrests data vary by orders of magnitude, so scaling is appropriate
> (pc.cr <- princomp(USArrests, cor = TRUE))  # inappropriate
Call:
princomp(x = USArrests, cor = TRUE)

Standard deviations:
   Comp.1    Comp.2    Comp.3    Comp.4 
1.5748783 0.9948694 0.5971291 0.4164494 

 4  variables and  50 observations.
> screeplot(pc.cr)
> 
> fit <- princomp(covmat = Harman74.cor)
> screeplot(fit)
> screeplot(fit, npcs = 24, type = "lines")
> 
> 
> 
> cleanEx()
> nameEx("sd")
> ### * sd
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: sd
> ### Title: Standard Deviation
> ### Aliases: sd
> ### Keywords: univar
> 
> ### ** Examples
> 
> sd(1:2) ^ 2
[1] 0.5
> 
> 
> 
> cleanEx()
> nameEx("se.contrast")
> ### * se.contrast
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: se.contrast
> ### Title: Standard Errors for Contrasts in Model Terms
> ### Aliases: se.contrast se.contrast.aov se.contrast.aovlist
> ### Keywords: models
> 
> ### ** Examples
> 
> ## From Venables and Ripley (2002) p.165.
> N <- c(0,1,0,1,1,1,0,0,0,1,1,0,1,1,0,0,1,0,1,0,1,1,0,0)
> P <- c(1,1,0,0,0,1,0,1,1,1,0,0,0,1,0,1,1,0,0,1,0,1,1,0)
> K <- c(1,0,0,1,0,1,1,0,0,1,0,1,0,1,1,0,0,0,1,1,1,0,1,0)
> yield <- c(49.5,62.8,46.8,57.0,59.8,58.5,55.5,56.0,62.8,55.8,69.5,
+ 55.0, 62.0,48.8,45.5,44.2,52.0,51.5,49.8,48.8,57.2,59.0,53.2,56.0)
> 
> npk <- data.frame(block = gl(6,4), N = factor(N), P = factor(P),
+                   K = factor(K), yield = yield)
> ## Set suitable contrasts.
> options(contrasts = c("contr.helmert", "contr.poly"))
> npk.aov1 <- aov(yield ~ block + N + K, data = npk)
> se.contrast(npk.aov1, list(N == "0", N == "1"), data = npk)
[1] 1.609175
> # or via a matrix
> cont <- matrix(c(-1,1), 2, 1, dimnames = list(NULL, "N"))
> se.contrast(npk.aov1, cont[N, , drop = FALSE]/12, data = npk)
       N 
1.609175 
> 
> ## test a multi-stratum model
> npk.aov2 <- aov(yield ~ N + K + Error(block/(N + K)), data = npk)
> se.contrast(npk.aov2, list(N == "0", N == "1"))
[1] 1.812166
> 
> 
> ## an example looking at an interaction contrast
> ## Dataset from R.E. Kirk (1995)
> ## 'Experimental Design: procedures for the behavioral sciences'
> score <- c(12, 8,10, 6, 8, 4,10,12, 8, 6,10,14, 9, 7, 9, 5,11,12,
+             7,13, 9, 9, 5,11, 8, 7, 3, 8,12,10,13,14,19, 9,16,14)
> A <- gl(2, 18, labels = c("a1", "a2"))
> B <- rep(gl(3, 6, labels = c("b1", "b2", "b3")), 2)
> fit <- aov(score ~ A*B)
> cont <- c(1, -1)[A] * c(1, -1, 0)[B]
> sum(cont)       # 0
[1] 0
> sum(cont*score) # value of the contrast
[1] -18
> se.contrast(fit, as.matrix(cont))
Contrast 1 
  14.24547 
> (t.stat <- sum(cont*score)/se.contrast(fit, as.matrix(cont)))
Contrast 1 
  -1.26356 
> summary(fit, split = list(B = 1:2), expand.split = TRUE)
            Df Sum Sq Mean Sq F value  Pr(>F)   
A            1  18.78   18.78   2.221 0.14661   
B            2  62.00   31.00   3.666 0.03763 * 
  B: C1      1   1.50    1.50   0.177 0.67662   
  B: C2      1  60.50   60.50   7.155 0.01199 * 
A:B          2  81.56   40.78   4.823 0.01527 * 
  A:B: C1    1  13.50   13.50   1.597 0.21612   
  A:B: C2    1  68.06   68.06   8.049 0.00809 **
Residuals   30 253.67    8.46                   
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> ## t.stat^2 is the F value on the A:B: C1 line (with Helmert contrasts)
> ## Now look at all three interaction contrasts
> cont <- c(1, -1)[A] * cbind(c(1, -1, 0), c(1, 0, -1), c(0, 1, -1))[B,]
> se.contrast(fit, cont)  # same, due to balance.
Contrast 1 Contrast 2 Contrast 3 
  14.24547   14.24547   14.24547 
> rm(A, B, score)
> 
> 
> ## multi-stratum example where efficiencies play a role
> ## An example from Yates (1932),
> ## a 2^3 design in 2 blocks replicated 4 times
> 
> Block <- gl(8, 4)
> A <- factor(c(0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,
+               0,1,0,1,0,1,0,1,0,1,0,1))
> B <- factor(c(0,0,1,1,0,0,1,1,0,1,0,1,1,0,1,0,0,0,1,1,
+               0,0,1,1,0,0,1,1,0,0,1,1))
> C <- factor(c(0,1,1,0,1,0,0,1,0,0,1,1,0,0,1,1,0,1,0,1,
+               1,0,1,0,0,0,1,1,1,1,0,0))
> Yield <- c(101, 373, 398, 291, 312, 106, 265, 450, 106, 306, 324, 449,
+            272, 89, 407, 338, 87, 324, 279, 471, 323, 128, 423, 334,
+            131, 103, 445, 437, 324, 361, 302, 272)
> aovdat <- data.frame(Block, A, B, C, Yield)
> fit <- aov(Yield ~ A + B * C + Error(Block), data = aovdat)
> cont1 <- c(-1, 1)[A]/32  # Helmert contrasts
> cont2 <- c(-1, 1)[B] * c(-1, 1)[C]/32
> cont <- cbind(A = cont1, BC = cont2)
> colSums(cont*Yield) # values of the contrasts
        A        BC 
 10.40625 -20.90625 
> se.contrast(fit, as.matrix(cont))
       A       BC 
3.377196 3.899650 
> 
> 
> base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly"))
> cleanEx()
> nameEx("selfStart")
> ### * selfStart
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: selfStart
> ### Title: Construct Self-starting Nonlinear Models
> ### Aliases: selfStart selfStart.default selfStart.formula
> ### Keywords: models
> 
> ### ** Examples
> 
> ## self-starting logistic model
> 
> ## The "initializer" (finds initial values for parameters from data):
> initLogis <- function(mCall, data, LHS, ...) {
+     xy <- sortedXyData(mCall[["x"]], LHS, data)
+     if(nrow(xy) < 4)
+         stop("too few distinct input values to fit a logistic model")
+     z <- xy[["y"]]
+     ## transform to proportion, i.e. in (0,1) :
+     rng <- range(z); dz <- diff(rng)
+     z <- (z - rng[1L] + 0.05 * dz)/(1.1 * dz)
+     xy[["z"]] <- log(z/(1 - z))		# logit transformation
+     aux <- coef(lm(x ~ z, xy))
+     pars <- coef(nls(y ~ 1/(1 + exp((xmid - x)/scal)),
+                      data = xy,
+                      start = list(xmid = aux[[1L]], scal = aux[[2L]]),
+                      algorithm = "plinear", ...))
+     setNames(pars [c(".lin", "xmid", "scal")],
+              mCall[c("Asym", "xmid", "scal")])
+ }
> 
> mySSlogis <- selfStart(~ Asym/(1 + exp((xmid - x)/scal)),
+                        initial = initLogis,
+                        parameters = c("Asym", "xmid", "scal"))
> ## IGNORE_RDIFF_BEGIN
> getInitial(weight ~ mySSlogis(Time, Asym, xmid, scal),
+            data = subset(ChickWeight, Chick == 1))
     Asym      xmid      scal 
937.03000  35.22296  11.40521 
> ## IGNORE_RDIFF_END
> 
> # 'first.order.log.model' is a function object defining a first order
> # compartment model
> # 'first.order.log.initial' is a function object which calculates initial
> # values for the parameters in 'first.order.log.model'
> #
> # self-starting first order compartment model
> ## Not run: 
> ##D SSfol <- selfStart(first.order.log.model, first.order.log.initial)
> ## End(Not run)
> 
> ## Explore the self-starting models already available in R's  "stats":
> pos.st <- which("package:stats" == search())
> mSS <- apropos("^SS..", where = TRUE, ignore.case = FALSE)
> (mSS <- unname(mSS[names(mSS) == pos.st]))
 [1] "SSasymp"     "SSasympOff"  "SSasympOrig" "SSbiexp"     "SSfol"      
 [6] "SSfpl"       "SSgompertz"  "SSlogis"     "SSmicmen"    "SSweibull"  
> fSS <- sapply(mSS, get, pos = pos.st, mode = "function")
> all(sapply(fSS, inherits, "selfStart"))  # -> TRUE
[1] TRUE
> 
> ## Show the argument list of each self-starting function:
> str(fSS, give.attr = FALSE)
List of 10
 $ SSasymp    :function (input, Asym, R0, lrc)  
 $ SSasympOff :function (input, Asym, lrc, c0)  
 $ SSasympOrig:function (input, Asym, lrc)  
 $ SSbiexp    :function (input, A1, lrc1, A2, lrc2)  
 $ SSfol      :function (Dose, input, lKe, lKa, lCl)  
 $ SSfpl      :function (input, A, B, xmid, scal)  
 $ SSgompertz :function (x, Asym, b2, b3)  
 $ SSlogis    :function (input, Asym, xmid, scal)  
 $ SSmicmen   :function (input, Vm, K)  
 $ SSweibull  :function (x, Asym, Drop, lrc, pwr)  
> 
> 
> 
> cleanEx()
> nameEx("setNames")
> ### * setNames
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: setNames
> ### Title: Set the Names in an Object
> ### Aliases: setNames
> ### Keywords: list
> 
> ### ** Examples
> 
> setNames( 1:3, c("foo", "bar", "baz") )
foo bar baz 
  1   2   3 
> # this is just a short form of
> tmp <- 1:3
> names(tmp) <-  c("foo", "bar", "baz")
> tmp
foo bar baz 
  1   2   3 
> 
> ## special case of character vector, using default
> setNames(nm = c("First", "2nd"))
  First     2nd 
"First"   "2nd" 
> 
> 
> 
> cleanEx()
> nameEx("shapiro.test")
> ### * shapiro.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: shapiro.test
> ### Title: Shapiro-Wilk Normality Test
> ### Aliases: shapiro.test
> ### Keywords: htest
> 
> ### ** Examples
> 
> shapiro.test(rnorm(100, mean = 5, sd = 3))

	Shapiro-Wilk normality test

data:  rnorm(100, mean = 5, sd = 3)
W = 0.9956, p-value = 0.9876

> shapiro.test(runif(100, min = 2, max = 4))

	Shapiro-Wilk normality test

data:  runif(100, min = 2, max = 4)
W = 0.93092, p-value = 5.616e-05

> 
> 
> 
> cleanEx()
> nameEx("sigma")
> ### * sigma
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: sigma
> ### Title: Extract Residual Standard Deviation 'Sigma'
> ### Aliases: sigma sigma.default sigma.mlm sigma.glm
> ### Keywords: models
> 
> ### ** Examples
> 
> ## -- lm() ------------------------------
> lm1 <- lm(Fertility ~ . , data = swiss)
> sigma(lm1) # ~= 7.165  = "Residual standard error"  printed from summary(lm1)
[1] 7.165369
> stopifnot(all.equal(sigma(lm1), summary(lm1)$sigma, tolerance=1e-15))
> 
> ## -- nls() -----------------------------
> DNase1 <- subset(DNase, Run == 1)
> fm.DN1 <- nls(density ~ SSlogis(log(conc), Asym, xmid, scal), DNase1)
> sigma(fm.DN1) # ~= 0.01919  as from summary(..)
[1] 0.01919449
> stopifnot(all.equal(sigma(fm.DN1), summary(fm.DN1)$sigma, tolerance=1e-15))
> 
> ## -- glm() -----------------------------
> ## -- a) Binomial -- Example from MASS
> ldose <- rep(0:5, 2)
> numdead <- c(1, 4, 9, 13, 18, 20, 0, 2, 6, 10, 12, 16)
> sex <- factor(rep(c("M", "F"), c(6, 6)))
> SF <- cbind(numdead, numalive = 20-numdead)
> sigma(budworm.lg <- glm(SF ~ sex*ldose, family = binomial))
[1] 1
> 
> ## -- b) Poisson -- from ?glm :
> ## Dobson (1990) Page 93: Randomized Controlled Trial :
> counts <- c(18,17,15,20,10,20,25,13,12)
> outcome <- gl(3,1,9)
> treatment <- gl(3,3)
> sigma(glm.D93 <- glm(counts ~ outcome + treatment, family = poisson()))
[1] 1
> ## equal to
> sqrt(summary(glm.D93)$dispersion) # == 1
[1] 1
> ## and the *Quasi*poisson's dispersion
> sigma(glm.qD93 <- update(glm.D93, family = quasipoisson()))
[1] 1.137234
> sigma (glm.qD93)^2 # 1.2933 equal to
[1] 1.2933
> summary(glm.qD93)$dispersion # == 1.2933
[1] 1.2933
> 
> ## -- Multivariate lm() "mlm" -----------
> utils::example("SSD", echo=FALSE)
> sigma(mlmfit) # is the same as {but more efficient than}
   deg0NA    deg4NA    deg8NA    deg0NP    deg4NP    deg8NP 
 56.92100  86.02325  78.99367  88.54377 109.54451 116.79041 
> sqrt(diag(estVar(mlmfit)))
   deg0NA    deg4NA    deg8NA    deg0NP    deg4NP    deg8NP 
 56.92100  86.02325  78.99367  88.54377 109.54451 116.79041 
> ## Don't show: 
> stopifnot(all.equal(sigma(mlmfit), sqrt(diag(estVar(mlmfit)))))
> ## End(Don't show)
> 
> 
> 
> cleanEx()
> nameEx("simulate")
> ### * simulate
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: simulate
> ### Title: Simulate Responses
> ### Aliases: simulate
> ### Keywords: models datagen
> 
> ### ** Examples
> 
> x <- 1:5
> mod1 <- lm(c(1:3, 7, 6) ~ x)
> S1 <- simulate(mod1, nsim = 4)
> ## repeat the simulation:
> .Random.seed <- attr(S1, "seed")
> identical(S1, simulate(mod1, nsim = 4))
[1] TRUE
> 
> S2 <- simulate(mod1, nsim = 200, seed = 101)
> rowMeans(S2) # should be about the same as
        1         2         3         4         5 
0.6885691 2.2329771 3.7790352 5.2775859 6.8131451 
> fitted(mod1)
  1   2   3   4   5 
0.8 2.3 3.8 5.3 6.8 
> 
> ## repeat identically:
> (sseed <- attr(S2, "seed")) # seed; RNGkind as attribute
[1] 101
attr(,"kind")
attr(,"kind")[[1]]
[1] "Mersenne-Twister"

attr(,"kind")[[2]]
[1] "Inversion"

attr(,"kind")[[3]]
[1] "Rejection"

> stopifnot(identical(S2, simulate(mod1, nsim = 200, seed = sseed)))
> 
> ## To be sure about the proper RNGkind, e.g., after
> RNGversion("2.7.0")
Warning in RNGkind("Mersenne-Twister", "Inversion", "Rounding") :
  non-uniform 'Rounding' sampler used
> ## first set the RNG kind, then simulate
> do.call(RNGkind, attr(sseed, "kind"))
> identical(S2, simulate(mod1, nsim = 200, seed = sseed))
[1] TRUE
> 
> ## Binomial GLM examples
> yb1 <- matrix(c(4, 4, 5, 7, 8, 6, 6, 5, 3, 2), ncol = 2)
> modb1 <- glm(yb1 ~ x, family = binomial)
> S3 <- simulate(modb1, nsim = 4)
> # each column of S3 is a two-column matrix.
> 
> x2 <- sort(runif(100))
> yb2 <- rbinom(100, prob = plogis(2*(x2-1)), size = 1)
> yb2 <- factor(1 + yb2, labels = c("failure", "success"))
> modb2 <- glm(yb2 ~ x2, family = binomial)
> S4 <- simulate(modb2, nsim = 4)
> # each column of S4 is a factor
> 
> 
> 
> cleanEx()
> nameEx("smooth")
> ### * smooth
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: smooth
> ### Title: Tukey's (Running Median) Smoothing
> ### Aliases: smooth
> ### Keywords: robust smooth
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## see also   demo(smooth) !
> 
> x1 <- c(4, 1, 3, 6, 6, 4, 1, 6, 2, 4, 2) # very artificial
> (x3R <- smooth(x1, "3R")) # 2 iterations of "3"
3R Tukey smoother resulting from  smooth(x = x1, kind = "3R") 
 used 2 iterations
 [1] 3 3 3 6 6 4 4 4 2 2 2
> smooth(x3R, kind = "S")
S Tukey smoother resulting from  smooth(x = x3R, kind = "S") 
 changed
 [1] 3 3 3 3 4 4 4 4 2 2 2
> 
> sm.3RS <- function(x, ...)
+    smooth(smooth(x, "3R", ...), "S", ...)
> 
> y <- c(1, 1, 19:1)
> plot(y, main = "misbehaviour of \"3RSR\"", col.main = 3)
> lines(sm.3RS(y))
> lines(smooth(y))
> lines(smooth(y, "3RSR"), col = 3, lwd = 2)  # the horror
> 
> x <- c(8:10, 10, 0, 0, 9, 9)
> plot(x, main = "breakdown of  3R  and  S  and hence  3RSS")
> matlines(cbind(smooth(x, "3R"), smooth(x, "S"), smooth(x, "3RSS"), smooth(x)))
> 
> presidents[is.na(presidents)] <- 0 # silly
> summary(sm3 <- smooth(presidents, "3R"))
3R Tukey smoother resulting from
 smooth(x = presidents, kind = "3R") ;  n = 120 
 used 4 iterations
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
    0.0    44.0    57.0    54.2    71.0    82.0 
> summary(sm2 <- smooth(presidents,"3RSS"))
3RSS Tukey smoother resulting from
 smooth(x = presidents, kind = "3RSS") ;  n = 120 
 used 5 iterations
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
   0.00   44.00   57.00   55.45   69.00   82.00 
> summary(sm  <- smooth(presidents))
3RS3R Tukey smoother resulting from
 smooth(x = presidents) ;  n = 120 
 used 7 iterations
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
  24.00   44.00   57.00   55.88   69.00   82.00 
> 
> all.equal(c(sm2), c(smooth(smooth(sm3, "S"), "S")))  # 3RSS  === 3R S S
[1] TRUE
> all.equal(c(sm),  c(smooth(smooth(sm3, "S"), "3R"))) # 3RS3R === 3R S 3R
[1] TRUE
> 
> plot(presidents, main = "smooth(presidents0, *) :  3R and default 3RS3R")
> lines(sm3, col = 3, lwd = 1.5)
> lines(sm, col = 2, lwd = 1.25)
> 
> 
> 
> cleanEx()
> nameEx("smooth.spline")
> ### * smooth.spline
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: smooth.spline
> ### Title: Fit a Smoothing Spline
> ### Aliases: smooth.spline .nknots.smspl
> ### Keywords: smooth
> 
> ### ** Examples
> 
> require(graphics)
> plot(dist ~ speed, data = cars, main = "data(cars)  &  smoothing splines")
> cars.spl <- with(cars, smooth.spline(speed, dist))
> cars.spl
Call:
smooth.spline(x = speed, y = dist)

Smoothing Parameter  spar= 0.7801305  lambda= 0.1112206 (11 iterations)
Equivalent Degrees of Freedom (Df): 2.635278
Penalized Criterion (RSS): 4187.776
GCV: 244.1044
> ## This example has duplicate points, so avoid cv = TRUE
> ## Don't show: 
>   stopifnot(cars.spl $ w == table(cars$speed)) # weights = multiplicities
>   utils::str(cars.spl, digits.d = 5, vec.len = 6)
List of 21
 $ x         : num [1:19] 4 7 8 9 10 11 12 13 14 15 16 17 18 19 20 ...
 $ y         : num [1:19] 1.6578 11.6829 15.0644 18.4823 21.9471 25.4656 29.0505 32.7076 ...
 $ w         : num [1:19] 2 2 1 1 3 2 4 4 4 3 2 3 4 3 5 ...
 $ yin       : num [1:19] 6 13 16 10 26 22.5 21.5 35 ...
 $ tol       : num 7e-06
 $ data      :List of 3
  ..$ x: num [1:50] 4 4 7 7 8 9 10 10 10 11 11 12 12 12 12 ...
  ..$ y: num [1:50] 2 10 4 22 16 10 18 26 34 17 28 14 20 24 28 ...
  ..$ w: num 1
 $ no.weights: logi TRUE
 $ n         : int 50
 $ lev       : num [1:19] 0.399342 0.179105 0.069771 0.055561 0.136721 0.077539 0.137252 0.126354 ...
 $ cv        : logi FALSE
 $ cv.crit   : num 244.1
 $ pen.crit  : num 4187.8
 $ crit      : num 244.1
 $ df        : num 2.6353
 $ spar      : num 0.78013
 $ ratio     : num 6.576e-05
 $ lambda    : num 0.11122
 $ iparms    : Named int [1:5] 1 0 11 0 NA
  ..- attr(*, "names")= chr [1:5] "icrit" "ispar" "iter" "" "errorI"
 $ auxM      : NULL
 $ fit       :List of 5
  ..$ knot : num [1:25] 0 0 0 0 0.14286 0.19048 0.2381 0.28571 ...
  ..$ nk   : int 21
  ..$ min  : num 4
  ..$ range: num 21
  ..$ coef : num [1:21] 1.6578 4.9869 9.4256 15.0584 18.4743 21.9384 25.4544 29.0377 ...
  ..- attr(*, "class")= chr "smooth.spline.fit"
 $ call      : language smooth.spline(x = speed, y = dist)
 - attr(*, "class")= chr "smooth.spline"
>   cars.spl$fit
$knot
 [1] 0.0000000 0.0000000 0.0000000 0.0000000 0.1428571 0.1904762 0.2380952
 [8] 0.2857143 0.3333333 0.3809524 0.4285714 0.4761905 0.5238095 0.5714286
[15] 0.6190476 0.6666667 0.7142857 0.7619048 0.8571429 0.9047619 0.9523810
[22] 1.0000000 1.0000000 1.0000000 1.0000000

$nk
[1] 21

$min
[1] 4

$range
[1] 21

$coef
 [1]  1.657809  4.986889  9.425578 15.058364 18.474307 21.938439 25.454405
 [8] 29.037679 32.697985 36.415692 40.180057 44.035086 48.004725 52.097325
[15] 56.299736 62.096055 68.215320 74.524063 79.313459 82.506943 84.103688

attr(,"class")
[1] "smooth.spline.fit"
> ## End(Don't show)
> lines(cars.spl, col = "blue")
> ss10 <- smooth.spline(cars[,"speed"], cars[,"dist"], df = 10)
> lines(ss10, lty = 2, col = "red")
> legend(5,120,c(paste("default [C.V.] => df =",round(cars.spl$df,1)),
+                "s( * , df = 10)"), col = c("blue","red"), lty = 1:2,
+        bg = 'bisque')
> 
> 
> ## Residual (Tukey Anscombe) plot:
> plot(residuals(cars.spl) ~ fitted(cars.spl))
> abline(h = 0, col = "gray")
> 
> ## consistency check:
> stopifnot(all.equal(cars$dist,
+                     fitted(cars.spl) + residuals(cars.spl)))
> ## The chosen inner knots in original x-scale :
> with(cars.spl$fit, min + range * knot[-c(1:3, nk+1 +1:3)]) # == unique(cars$speed)
 [1]  4  7  8  9 10 11 12 13 14 15 16 17 18 19 20 22 23 24 25
> 
> ## Visualize the behavior of  .nknots.smspl()
> nKnots <- Vectorize(.nknots.smspl) ; c.. <- adjustcolor("gray20",.5)
> curve(nKnots, 1, 250, n=250)
> abline(0,1, lty=2, col=c..); text(90,90,"y = x", col=c.., adj=-.25)
> abline(h=100,lty=2); abline(v=200, lty=2)
> 
> n <- c(1:799, seq(800, 3490, by=10), seq(3500, 10000, by = 50))
> plot(n, nKnots(n), type="l", main = "Vectorize(.nknots.smspl) (n)")
> abline(0,1, lty=2, col=c..); text(180,180,"y = x", col=c..)
> n0 <- c(50, 200, 800, 3200); c0 <- adjustcolor("blue3", .5)
> lines(n0, nKnots(n0), type="h", col=c0)
> axis(1, at=n0, line=-2, col.ticks=c0, col=NA, col.axis=c0)
> axis(4, at=.nknots.smspl(10000), line=-.5, col=c..,col.axis=c.., las=1)
> 
> ##-- artificial example
> y18 <- c(1:3, 5, 4, 7:3, 2*(2:5), rep(10, 4))
> xx  <- seq(1, length(y18), length.out = 201)
> (s2   <- smooth.spline(y18)) # GCV
Call:
smooth.spline(x = y18)

Smoothing Parameter  spar= 0.3928105  lambda= 9.672776e-05 (13 iterations)
Equivalent Degrees of Freedom (Df): 8.494168
Penalized Criterion (RSS): 3.59204
GCV: 0.7155391
> (s02  <- smooth.spline(y18, spar = 0.2))
Call:
smooth.spline(x = y18, spar = 0.2)

Smoothing Parameter  spar= 0.2  lambda= 3.911187e-06
Equivalent Degrees of Freedom (Df): 15.259
Penalized Criterion (RSS): 0.4973656
GCV: 1.191602
> (s02. <- smooth.spline(y18, spar = 0.2, cv = NA))
Call:
smooth.spline(x = y18, spar = 0.2, cv = NA)

Smoothing Parameter  spar= 0.2  lambda= 3.911187e-06
Equivalent Degrees of Freedom (Df): NA
Penalized Criterion (RSS): 0.4973656
> plot(y18, main = deparse(s2$call), col.main = 2)
> lines(s2, col = "gray"); lines(predict(s2, xx), col = 2)
> lines(predict(s02, xx), col = 3); mtext(deparse(s02$call), col = 3)
> 
> ## Specifying 'lambda' instead of usual spar :
> (s2. <- smooth.spline(y18, lambda = s2$lambda, tol = s2$tol))
Call:
smooth.spline(x = y18, lambda = s2$lambda, tol = s2$tol)

Smoothing Parameter  spar= NA  lambda= 9.672776e-05
Equivalent Degrees of Freedom (Df): 8.494168
Penalized Criterion (RSS): 3.59204
GCV: 0.7155391
> 
> ## Don't show: 
> stopifnot(identical(
+     with(s2$fit, min + range * knot[-c(1:3, nk+1+1:3)]),
+     as.numeric(1:18)),
+     with(cars.spl$fit, min + range * knot[-c(1:3, nk+1+1:3)]) == unique(cars$speed))
> 
> nD <- c("spar", "ratio", "iparms", "call"); nn <- setdiff(names(s2), nD)
> stopifnot(all.equal(s2[nn], s2.[nn], tolerance = 7e-7), # seen 6.86e-8
+           all.equal(predict(s02 , xx),
+ 		    predict(s02., xx), tolerance = 1e-15))
> ## End(Don't show)
> 
> 
> cleanEx()
> nameEx("smoothEnds")
> ### * smoothEnds
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: smoothEnds
> ### Title: End Points Smoothing (for Running Medians)
> ### Aliases: smoothEnds
> ### Keywords: smooth robust
> 
> ### ** Examples
> 
> require(graphics)
> 
> y <- ys <- (-20:20)^2
> y [c(1,10,21,41)] <-  c(100, 30, 400, 470)
> s7k <- runmed(y, 7, endrule = "keep")
> s7. <- runmed(y, 7, endrule = "const")
> s7m <- runmed(y, 7)
> col3 <- c("midnightblue","blue","steelblue")
> plot(y, main = "Running Medians -- runmed(*, k=7, endrule = X)")
> lines(ys, col = "light gray")
> matlines(cbind(s7k, s7.,s7m), lwd = 1.5, lty = 1, col = col3)
> eRules <- c("keep","constant","median")
> legend("topleft", paste("endrule", eRules, sep = " = "),
+        col = col3, lwd = 1.5, lty = 1, bty = "n")
> 
> stopifnot(identical(s7m, smoothEnds(s7k, 7)))
> 
> ## With missing values (for R >= 3.6.1):
> yN <- y; yN[c(2,40)] <- NA
> rN <- sapply(eRules, function(R) runmed(yN, 7, endrule=R))
> matlines(rN, type = "b", pch = 4, lwd = 3, lty=2,
+          col = adjustcolor(c("red", "orange4", "orange1"), 0.5))
> yN[c(1, 20:21)] <- NA # additionally
> rN. <- sapply(eRules, function(R) runmed(yN, 7, endrule=R))
> head(rN., 4); tail(rN.) # more NA's too, still not *so* many:
     keep constant median
[1,]   NA      256    392
[2,]   NA      256    324
[3,]  324      256    256
[4,]  256      256    256
      keep constant median
[36,]  225      225    225
[37,]  256      256    256
[38,]  289      289    289
[39,]  324      289    289
[40,]   NA      289    397
[41,]  470      289    470
> stopifnot(exprs = {
+    !anyNA(rN[,2:3])
+    identical(which(is.na(rN[,"keep"])), c(2L, 40L))
+    identical(which(is.na(rN.), arr.ind=TRUE, useNames=FALSE),
+              cbind(c(1:2,40L), 1L))
+    identical(rN.[38:41, "median"], c(289,289, 397, 470))
+ })
> 
> 
> 
> cleanEx()
> nameEx("sortedXyData")
> ### * sortedXyData
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: sortedXyData
> ### Title: Create a 'sortedXyData' Object
> ### Aliases: sortedXyData sortedXyData.default
> ### Keywords: manip
> 
> ### ** Examples
> 
> DNase.2 <- DNase[ DNase$Run == "2", ]
> sortedXyData( expression(log(conc)), expression(density), DNase.2 )
           x      y
1 -3.0194489 0.0475
2 -1.6331544 0.1300
3 -0.9400073 0.2160
4 -0.2468601 0.3920
5  0.4462871 0.6765
6  1.1394343 1.0970
7  1.8325815 1.5400
8  2.5257286 1.9230
> 
> 
> 
> cleanEx()
> nameEx("spec.ar")
> ### * spec.ar
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: spec.ar
> ### Title: Estimate Spectral Density of a Time Series from AR Fit
> ### Aliases: spec.ar
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> spec.ar(lh)
> 
> spec.ar(ldeaths)
> spec.ar(ldeaths, method = "burg")
> 
> spec.ar(log(lynx))
> spec.ar(log(lynx), method = "burg", add = TRUE, col = "purple")
> spec.ar(log(lynx), method = "mle", add = TRUE, col = "forest green")
> spec.ar(log(lynx), method = "ols", add = TRUE, col = "blue")
> 
> 
> 
> cleanEx()
> nameEx("spec.pgram")
> ### * spec.pgram
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: spec.pgram
> ### Title: Estimate Spectral Density of a Time Series by a Smoothed
> ###   Periodogram
> ### Aliases: spec.pgram
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Examples from Venables & Ripley
> spectrum(ldeaths)
> spectrum(ldeaths, spans = c(3,5))
> spectrum(ldeaths, spans = c(5,7))
> spectrum(mdeaths, spans = c(3,3))
> spectrum(fdeaths, spans = c(3,3))
> 
> ## bivariate example
> mfdeaths.spc <- spec.pgram(ts.union(mdeaths, fdeaths), spans = c(3,3))
> # plots marginal spectra: now plot coherency and phase
> plot(mfdeaths.spc, plot.type = "coherency")
> plot(mfdeaths.spc, plot.type = "phase")
> 
> ## now impose a lack of alignment
> mfdeaths.spc <- spec.pgram(ts.intersect(mdeaths, lag(fdeaths, 4)),
+    spans = c(3,3), plot = FALSE)
> plot(mfdeaths.spc, plot.type = "coherency")
> plot(mfdeaths.spc, plot.type = "phase")
> 
> stocks.spc <- spectrum(EuStockMarkets, kernel("daniell", c(30,50)),
+                        plot = FALSE)
> plot(stocks.spc, plot.type = "marginal") # the default type
> plot(stocks.spc, plot.type = "coherency")
> plot(stocks.spc, plot.type = "phase")
> 
> sales.spc <- spectrum(ts.union(BJsales, BJsales.lead),
+                       kernel("modified.daniell", c(5,7)))
> plot(sales.spc, plot.type = "coherency")
> plot(sales.spc, plot.type = "phase")
> 
> 
> 
> cleanEx()
> nameEx("spectrum")
> ### * spectrum
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: spectrum
> ### Title: Spectral Density Estimation
> ### Aliases: spectrum spec
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> ## Examples from Venables & Ripley
> ## spec.pgram
> par(mfrow = c(2,2))
> spectrum(lh)
> spectrum(lh, spans = 3)
> spectrum(lh, spans = c(3,3))
> spectrum(lh, spans = c(3,5))
> 
> spectrum(ldeaths)
> spectrum(ldeaths, spans = c(3,3))
> spectrum(ldeaths, spans = c(3,5))
> spectrum(ldeaths, spans = c(5,7))
> spectrum(ldeaths, spans = c(5,7), log = "dB", ci = 0.8)
> 
> # for multivariate examples see the help for spec.pgram
> 
> ## spec.ar
> spectrum(lh, method = "ar")
> spectrum(ldeaths, method = "ar")
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("splinefun")
> ### * splinefun
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: splinefun
> ### Title: Interpolating Splines
> ### Aliases: spline splinefun splinefunH
> ### Keywords: math dplot
> 
> ### ** Examples
> 
> require(graphics)
> 
> op <- par(mfrow = c(2,1), mgp = c(2,.8,0), mar = 0.1+c(3,3,3,1))
> n <- 9
> x <- 1:n
> y <- rnorm(n)
> plot(x, y, main = paste("spline[fun](.) through", n, "points"))
> lines(spline(x, y))
> lines(spline(x, y, n = 201), col = 2)
> 
> y <- (x-6)^2
> plot(x, y, main = "spline(.) -- 3 methods")
> lines(spline(x, y, n = 201), col = 2)
> lines(spline(x, y, n = 201, method = "natural"), col = 3)
> lines(spline(x, y, n = 201, method = "periodic"), col = 4)
Warning in spline(x, y, n = 201, method = "periodic") :
  spline: first and last y values differ - using y[1] for both
> legend(6, 25, c("fmm","natural","periodic"), col = 2:4, lty = 1)
> 
> y <- sin((x-0.5)*pi)
> f <- splinefun(x, y)
> ls(envir = environment(f))
[1] "z"
> splinecoef <- get("z", envir = environment(f))
> curve(f(x), 1, 10, col = "green", lwd = 1.5)
> points(splinecoef, col = "purple", cex = 2)
> curve(f(x, deriv = 1), 1, 10, col = 2, lwd = 1.5)
> curve(f(x, deriv = 2), 1, 10, col = 2, lwd = 1.5, n = 401)
> curve(f(x, deriv = 3), 1, 10, col = 2, lwd = 1.5, n = 401)
> par(op)
> 
> ## Manual spline evaluation --- demo the coefficients :
> .x <- splinecoef$x
> u <- seq(3, 6, by = 0.25)
> (ii <- findInterval(u, .x))
 [1] 3 3 3 3 4 4 4 4 5 5 5 5 6
> dx <- u - .x[ii]
> f.u <- with(splinecoef,
+             y[ii] + dx*(b[ii] + dx*(c[ii] + dx* d[ii])))
> stopifnot(all.equal(f(u), f.u))
> 
> ## An example with ties (non-unique  x values):
> set.seed(1); x <- round(rnorm(30), 1); y <- sin(pi * x) + rnorm(30)/10
> plot(x, y, main = "spline(x,y)  when x has ties")
> lines(spline(x, y, n = 201), col = 2)
Warning in regularize.values(x, y, ties, missing(ties)) :
  collapsing to unique 'x' values
> ## visualizes the non-unique ones:
> tx <- table(x); mx <- as.numeric(names(tx[tx > 1]))
> ry <- matrix(unlist(tapply(y, match(x, mx), range, simplify = FALSE)),
+              ncol = 2, byrow = TRUE)
> segments(mx, ry[, 1], mx, ry[, 2], col = "blue", lwd = 2)
> 
> ## Another example with sorted x, but ties:
> set.seed(8); x <- sort(round(rnorm(30), 1)); y <- round(sin(pi * x) + rnorm(30)/10, 3)
> summary(diff(x) == 0) # -> 7 duplicated x-values
   Mode   FALSE    TRUE 
logical      22       7 
> str(spline(x, y, n = 201, ties="ordered")) # all '$y' entries are NaN
List of 2
 $ x: num [1:201] -3 -2.98 -2.95 -2.92 -2.9 ...
 $ y: num [1:201] NaN NaN NaN NaN NaN NaN NaN NaN NaN NaN ...
> ## The default (ties=mean) is ok, but most efficient to use instead is
> sxyo <- spline(x, y, n = 201, ties= list("ordered", mean))
> sapply(sxyo, summary)# all fine now
            x           y
Min.    -3.00 -1.07800000
1st Qu. -1.75 -0.57050000
Median  -0.50 -0.13613265
Mean    -0.50 -0.08208311
3rd Qu.  0.75  0.50611122
Max.     2.00  1.03072752
> plot(x, y, main = "spline(x,y, ties=list(\"ordered\", mean))  for when x has ties")
> lines(sxyo, col="blue")
> 
> ## An example of monotone interpolation
> n <- 20
> set.seed(11)
> x. <- sort(runif(n)) ; y. <- cumsum(abs(rnorm(n)))
> plot(x., y.)
> curve(splinefun(x., y.)(x), add = TRUE, col = 2, n = 1001)
> curve(splinefun(x., y., method = "monoH.FC")(x), add = TRUE, col = 3, n = 1001)
> curve(splinefun(x., y., method = "hyman")   (x), add = TRUE, col = 4, n = 1001)
> legend("topleft",
+        paste0("splinefun( \"", c("fmm", "monoH.FC", "hyman"), "\" )"),
+        col = 2:4, lty = 1, bty = "n")
> 
> ## and one from Fritsch and Carlson (1980), Dougherty et al (1989)
> x. <- c(7.09, 8.09, 8.19, 8.7, 9.2, 10, 12, 15, 20)
> f <- c(0, 2.76429e-5, 4.37498e-2, 0.169183, 0.469428, 0.943740,
+        0.998636, 0.999919, 0.999994)
> s0 <- splinefun(x., f)
> s1 <- splinefun(x., f, method = "monoH.FC")
> s2 <- splinefun(x., f, method = "hyman")
> plot(x., f, ylim = c(-0.2, 1.2))
> curve(s0(x), add = TRUE, col = 2, n = 1001) -> m0
> curve(s1(x), add = TRUE, col = 3, n = 1001)
> curve(s2(x), add = TRUE, col = 4, n = 1001)
> legend("right",
+        paste0("splinefun( \"", c("fmm", "monoH.FC", "hyman"), "\" )"),
+        col = 2:4, lty = 1, bty = "n")
> 
> ## they seem identical, but are not quite:
> xx <- m0$x
> plot(xx, s1(xx) - s2(xx), type = "l",  col = 2, lwd = 2,
+      main = "Difference   monoH.FC - hyman"); abline(h = 0, lty = 3)
> 
> x <- xx[xx < 10.2] ## full range: x <- xx .. does not show enough
> ccol <- adjustcolor(2:4, 0.8)
> matplot(x, cbind(s0(x, deriv = 2), s1(x, deriv = 2), s2(x, deriv = 2))^2,
+         lwd = 2, col = ccol, type = "l", ylab = quote({{f*second}(x)}^2),
+         main = expression({{f*second}(x)}^2 ~" for the three 'splines'"))
> legend("topright",
+        paste0("splinefun( \"", c("fmm", "monoH.FC", "hyman"), "\" )"),
+        lwd = 2, col  =  ccol, lty = 1:3, bty = "n")
> ## --> "hyman" has slightly smaller  Integral f''(x)^2 dx  than "FC",
> ## here, and both are 'much worse' than the regular fmm spline.
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("stat.anova")
> ### * stat.anova
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: stat.anova
> ### Title: GLM ANOVA Statistics
> ### Aliases: stat.anova
> ### Keywords: regression models
> 
> ### ** Examples
> 
> ##-- Continued from '?glm':
> ## Don't show: 
> utils::example("glm", echo = FALSE)
> ## End(Don't show)
> print(ag <- anova(glm.D93))
Analysis of Deviance Table

Model: poisson, link: log

Response: counts

Terms added sequentially (first to last)


          Df Deviance Resid. Df Resid. Dev Pr(>Chi)  
NULL                          8    10.5814           
outcome    2   5.4523         6     5.1291  0.06547 .
treatment  2   0.0000         4     5.1291  1.00000  
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> stat.anova(ag$table, test = "Cp",
+            scale = sum(resid(glm.D93, "pearson")^2)/4,
+            df.scale = 4, n = 9)
     table Cp
> 
> 
> 
> cleanEx()
> nameEx("step")
> ### * step
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: step
> ### Title: Choose a model by AIC in a Stepwise Algorithm
> ### Aliases: step
> ### Keywords: models
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("stepfun")
> ### * stepfun
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: stepfun
> ### Title: Step Functions - Creation and Class
> ### Aliases: stepfun is.stepfun as.stepfun print.stepfun summary.stepfun
> ###   knots
> ### Keywords: dplot
> 
> ### ** Examples
> 
> y0 <- c(1., 2., 4., 3.)
> sfun0  <- stepfun(1:3, y0, f = 0)
> sfun.2 <- stepfun(1:3, y0, f = 0.2)
> sfun1  <- stepfun(1:3, y0, f = 1)
> sfun1c <- stepfun(1:3, y0, right = TRUE) # hence f=1
> sfun0
Step function
Call: stepfun(1:3, y0, f = 0)
 x[1:3] =      1,      2,      3
4 plateau levels =      1,      2,      4,      3
> summary(sfun0)
Step function with continuity 'f'= 0 ,  3 knots at
[1] 1 2 3
  and	4 plateau levels (y) at
[1] 1 2 4 3
> summary(sfun.2)
Step function with continuity 'f'= 0.2 ,  3 knots at
[1] 1 2 3
  and	4 plateau levels (y) at
[1] 1 2 4 3
> 
> ## look at the internal structure:
> unclass(sfun0)
function (v) 
.approxfun(x, y, v, method, yleft, yright, f, na.rm)
<bytecode: 0x115d97208>
<environment: 0x106858c98>
attr(,"call")
stepfun(1:3, y0, f = 0)
> ls(envir = environment(sfun0))
[1] "f"      "method" "na.rm"  "x"      "y"      "yleft"  "yright"
> 
> x0 <- seq(0.5, 3.5, by = 0.25)
> rbind(x = x0, f.f0 = sfun0(x0), f.f02 = sfun.2(x0),
+       f.f1 = sfun1(x0), f.f1c = sfun1c(x0))
      [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [,11] [,12] [,13]
x      0.5 0.75    1 1.25  1.5 1.75    2 2.25  2.5  2.75     3  3.25   3.5
f.f0   1.0 1.00    2 2.00  2.0 2.00    4 4.00  4.0  4.00     3  3.00   3.0
f.f02  1.0 1.00    2 2.40  2.4 2.40    4 3.80  3.8  3.80     3  3.00   3.0
f.f1   1.0 1.00    2 4.00  4.0 4.00    4 3.00  3.0  3.00     3  3.00   3.0
f.f1c  1.0 1.00    1 2.00  2.0 2.00    2 4.00  4.0  4.00     4  3.00   3.0
> ## Identities :
> stopifnot(identical(y0[-1], sfun0 (1:3)), # right = FALSE
+           identical(y0[-4], sfun1c(1:3))) # right = TRUE
> 
> 
> 
> cleanEx()
> nameEx("stl")
> ### * stl
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: stl
> ### Title: Seasonal Decomposition of Time Series by Loess
> ### Aliases: stl
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> plot(stl(nottem, "per"))
> plot(stl(nottem, s.window = 7, t.window = 50, t.jump = 1))
> 
> plot(stllc <- stl(log(co2), s.window = 21))
> summary(stllc)
 Call:
 stl(x = log(co2), s.window = 21)

 Time.series components:
    seasonal              trend         remainder         
 Min.   :-9.939e-03   Min.   :5.754   Min.   :-2.255e-03  
 1st Qu.:-4.537e-03   1st Qu.:5.779   1st Qu.:-4.587e-04  
 Median : 8.778e-04   Median :5.815   Median :-8.867e-06  
 Mean   :-1.304e-06   Mean   :5.819   Mean   :-1.966e-06  
 3rd Qu.: 4.998e-03   3rd Qu.:5.860   3rd Qu.: 4.023e-04  
 Max.   : 9.115e-03   Max.   :5.899   Max.   : 1.940e-03  
 IQR:
     STL.seasonal STL.trend STL.remainder data    
     0.009534     0.081250  0.000861      0.079370
   %  12.0        102.4       1.1         100.0   

 Weights: all == 1

 Other components: List of 5
 $ win  : Named num [1:3] 21 21 13
 $ deg  : Named int [1:3] 0 1 1
 $ jump : Named num [1:3] 3 3 2
 $ inner: int 2
 $ outer: int 0
> ## linear trend, strict period.
> plot(stl(log(co2), s.window = "per", t.window = 1000))
> 
> ## Two STL plotted side by side :
>         stmd <- stl(mdeaths, s.window = "per") # non-robust
> summary(stmR <- stl(mdeaths, s.window = "per", robust = TRUE))
 Call:
 stl(x = mdeaths, s.window = "per", robust = TRUE)

 Time.series components:
    seasonal           trend        remainder       
 Min.   :-446.83   Min.   :1319   Min.   :-314.584  
 1st Qu.:-301.57   1st Qu.:1432   1st Qu.: -32.539  
 Median : -79.06   Median :1449   Median :   5.794  
 Mean   :   0.00   Mean   :1473   Mean   :  23.065  
 3rd Qu.: 304.57   3rd Qu.:1549   3rd Qu.:  47.513  
 Max.   : 544.79   Max.   :1616   Max.   : 872.199  
 IQR:
     STL.seasonal STL.trend STL.remainder data  
     606.14       116.77     80.05        707.75
   %  85.6         16.5      11.3         100.0 

 Weights:
   Min. 1st Qu.  Median    Mean 3rd Qu.    Max. 
 0.0000  0.5594  0.9452  0.7312  0.9853  1.0000 

 Other components: List of 5
 $ win  : Named num [1:3] 721 19 13
 $ deg  : Named int [1:3] 0 1 1
 $ jump : Named num [1:3] 73 2 2
 $ inner: int 1
 $ outer: int 15
> op <- par(mar = c(0, 4, 0, 3), oma = c(5, 0, 4, 0), mfcol = c(4, 2))
> plot(stmd, set.pars = NULL, labels  =  NULL,
+      main = "stl(mdeaths, s.w = \"per\",  robust = FALSE / TRUE )")
> plot(stmR, set.pars = NULL)
> # mark the 'outliers' :
> (iO <- which(stmR $ weights  < 1e-8)) # 10 were considered outliers
 [1] 24 26 27 28 36 37 50 52 59 61
> sts <- stmR$time.series
> points(time(sts)[iO], 0.8* sts[,"remainder"][iO], pch = 4, col = "red")
> par(op)   # reset
> 
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("summary.aov")
> ### * summary.aov
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: summary.aov
> ### Title: Summarize an Analysis of Variance Model
> ### Aliases: summary.aov summary.aovlist print.summary.aov
> ###   print.summary.aovlist
> ### Keywords: models regression
> 
> ### ** Examples
> 
> ## For a simple example see example(aov)
> 
> # Cochran and Cox (1957, p.164)
> # 3x3 factorial with ordered factors, each is average of 12.
> CC <- data.frame(
+     y = c(449, 413, 326, 409, 358, 291, 341, 278, 312)/12,
+     P = ordered(gl(3, 3)), N = ordered(gl(3, 1, 9))
+ )
> CC.aov <- aov(y ~ N * P, data = CC , weights = rep(12, 9))
> summary(CC.aov)
            Df Sum Sq Mean Sq
N            2 1016.7   508.3
P            2  917.4   458.7
N:P          4  399.3    99.8
> 
> # Split both main effects into linear and quadratic parts.
> summary(CC.aov, split = list(N = list(L = 1, Q = 2),
+                              P = list(L = 1, Q = 2)))
            Df Sum Sq Mean Sq
N            2 1016.7   508.3
  N: L       1 1012.5  1012.5
  N: Q       1    4.2     4.2
P            2  917.4   458.7
  P: L       1  917.3   917.3
  P: Q       1    0.0     0.0
N:P          4  399.3    99.8
  N:P: L.L   1  184.1   184.1
  N:P: Q.L   1  152.1   152.1
  N:P: L.Q   1   49.0    49.0
  N:P: Q.Q   1   14.1    14.1
> 
> # Split only the interaction
> summary(CC.aov, split = list("N:P" = list(L.L = 1, Q = 2:4)))
            Df Sum Sq Mean Sq
N            2 1016.7   508.3
P            2  917.4   458.7
N:P          4  399.3    99.8
  N:P: L.L   1  184.1   184.1
  N:P: Q     3  215.2    71.7
> 
> # split on just one var
> summary(CC.aov, split = list(P = list(lin = 1, quad = 2)))
            Df Sum Sq Mean Sq
N            2 1016.7   508.3
P            2  917.4   458.7
  P: lin     1  917.3   917.3
  P: quad    1    0.0     0.0
N:P          4  399.3    99.8
  N:P: lin   2  336.2   168.1
  N:P: quad  2   63.1    31.5
> summary(CC.aov, split = list(P = list(lin = 1, quad = 2)),
+         expand.split = FALSE)
            Df Sum Sq Mean Sq
N            2 1016.7   508.3
P            2  917.4   458.7
  P: lin     1  917.3   917.3
  P: quad    1    0.0     0.0
N:P          4  399.3    99.8
> 
> 
> cleanEx()
> nameEx("summary.glm")
> ### * summary.glm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: summary.glm
> ### Title: Summarizing Generalized Linear Model Fits
> ### Aliases: summary.glm print.summary.glm
> ### Keywords: models regression
> 
> ### ** Examples
> 
> ## For examples see example(glm)
> 
> 
> 
> cleanEx()
> nameEx("summary.lm")
> ### * summary.lm
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: summary.lm
> ### Title: Summarizing Linear Model Fits
> ### Aliases: summary.lm summary.mlm print.summary.lm
> ### Keywords: regression models
> 
> ### ** Examples
> 
> ## Don't show: 
> utils::example("lm", echo = FALSE)
> ## End(Don't show)
> ##-- Continuing the  lm(.) example:
> coef(lm.D90)  # the bare coefficients
groupCtl groupTrt 
   5.032    4.661 
> sld90 <- summary(lm.D90 <- lm(weight ~ group -1))  # omitting intercept
> sld90

Call:
lm(formula = weight ~ group - 1)

Residuals:
    Min      1Q  Median      3Q     Max 
-1.0710 -0.4938  0.0685  0.2462  1.3690 

Coefficients:
         Estimate Std. Error t value Pr(>|t|)    
groupCtl   5.0320     0.2202   22.85 9.55e-15 ***
groupTrt   4.6610     0.2202   21.16 3.62e-14 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.6964 on 18 degrees of freedom
Multiple R-squared:  0.9818,	Adjusted R-squared:  0.9798 
F-statistic: 485.1 on 2 and 18 DF,  p-value: < 2.2e-16

> coef(sld90)  # much more
         Estimate Std. Error  t value     Pr(>|t|)
groupCtl    5.032  0.2202177 22.85012 9.547128e-15
groupTrt    4.661  0.2202177 21.16542 3.615345e-14
> 
> ## model with *aliased* coefficient:
> lm.D9. <- lm(weight ~ group + I(group != "Ctl"))
> Sm.D9. <- summary(lm.D9.)
> Sm.D9. #  shows the NA NA NA NA  line

Call:
lm(formula = weight ~ group + I(group != "Ctl"))

Residuals:
    Min      1Q  Median      3Q     Max 
-1.0710 -0.4938  0.0685  0.2462  1.3690 

Coefficients: (1 not defined because of singularities)
                      Estimate Std. Error t value Pr(>|t|)    
(Intercept)             5.0320     0.2202  22.850 9.55e-15 ***
groupTrt               -0.3710     0.3114  -1.191    0.249    
I(group != "Ctl")TRUE       NA         NA      NA       NA    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.6964 on 18 degrees of freedom
Multiple R-squared:  0.07308,	Adjusted R-squared:  0.02158 
F-statistic: 1.419 on 1 and 18 DF,  p-value: 0.249

> stopifnot(length(cc <- coef(lm.D9.)) == 3, is.na(cc[3]),
+           dim(coef(Sm.D9.)) == c(2,4), Sm.D9.$df == c(2, 18, 3))
> 
> 
> 
> cleanEx()
> nameEx("summary.manova")
> ### * summary.manova
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: summary.manova
> ### Title: Summary Method for Multivariate Analysis of Variance
> ### Aliases: summary.manova print.summary.manova
> ### Keywords: models
> 
> ### ** Examples
> 
> 
> cleanEx()
> nameEx("summary.princomp")
> ### * summary.princomp
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: summary.princomp
> ### Title: Summary method for Principal Components Analysis
> ### Aliases: summary.princomp print.summary.princomp
> ### Keywords: multivariate
> 
> ### ** Examples
> 
> summary(pc.cr <- princomp(USArrests, cor = TRUE))
Importance of components:
                          Comp.1    Comp.2    Comp.3     Comp.4
Standard deviation     1.5748783 0.9948694 0.5971291 0.41644938
Proportion of Variance 0.6200604 0.2474413 0.0891408 0.04335752
Cumulative Proportion  0.6200604 0.8675017 0.9566425 1.00000000
> ## The signs of the loading columns are arbitrary
> print(summary(princomp(USArrests, cor = TRUE),
+               loadings = TRUE, cutoff = 0.2), digits = 2)
Importance of components:
                          Comp.1    Comp.2    Comp.3     Comp.4
Standard deviation     1.5748783 0.9948694 0.5971291 0.41644938
Proportion of Variance 0.6200604 0.2474413 0.0891408 0.04335752
Cumulative Proportion  0.6200604 0.8675017 0.9566425 1.00000000

Loadings:
         Comp.1 Comp.2 Comp.3 Comp.4
Murder    0.54   0.42   0.34   0.65 
Assault   0.58          0.27  -0.74 
UrbanPop  0.28  -0.87   0.38        
Rape      0.54         -0.82        
> 
> 
> 
> cleanEx()
> nameEx("supsmu")
> ### * supsmu
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: supsmu
> ### Title: Friedman's SuperSmoother
> ### Aliases: supsmu
> ### Keywords: smooth
> 
> ### ** Examples
> 
> require(graphics)
> 
> with(cars, {
+     plot(speed, dist)
+     lines(supsmu(speed, dist))
+     lines(supsmu(speed, dist, bass = 7), lty = 2)
+     })
> 
> 
> 
> cleanEx()
> nameEx("symnum")
> ### * symnum
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: symnum
> ### Title: Symbolic Number Coding
> ### Aliases: symnum
> ### Keywords: utilities character
> 
> ### ** Examples
> 
> ii <- setNames(0:8, 0:8)
> symnum(ii, cutpoints =  2*(0:4), symbols = c(".", "-", "+", "$"))
0 1 2 3 4 5 6 7 8 
. . . - - + + $ $ 
attr(,"legend")
[1] 0 ‘.’ 2 ‘-’ 4 ‘+’ 6 ‘$’ 8
> symnum(ii, cutpoints =  2*(0:4), symbols = c(".", "-", "+", "$"), show.max = TRUE)
0 1 2 3 4 5 6 7 8 
. . . - - + + $ 8 
attr(,"legend")
[1] 0 ‘.’ 2 ‘-’ 4 ‘+’ 6 ‘$’ 8
> 
> symnum(1:12 %% 3 == 0)  # --> "|" = TRUE, "." = FALSE  for logical
 [1] . . | . . | . . | . . |
> 
> ## Pascal's Triangle modulo 2 -- odd and even numbers:
> N <- 38
> pascal <- t(sapply(0:N, function(n) round(choose(n, 0:N - (N-n)%/%2))))
> rownames(pascal) <- rep("", 1+N) # <-- to improve "graphic"
> symnum(pascal %% 2, symbols = c(" ", "A"), numeric.x = FALSE)
                                                                              
                                       A                                      
                                     A A                                      
                                     A   A                                    
                                   A A A A                                    
                                   A       A                                  
                                 A A     A A                                  
                                 A   A   A   A                                
                               A A A A A A A A                                
                               A               A                              
                             A A             A A                              
                             A   A           A   A                            
                           A A A A         A A A A                            
                           A       A       A       A                          
                         A A     A A     A A     A A                          
                         A   A   A   A   A   A   A   A                        
                       A A A A A A A A A A A A A A A A                        
                       A                               A                      
                     A A                             A A                      
                     A   A                           A   A                    
                   A A A A                         A A A A                    
                   A       A                       A       A                  
                 A A     A A                     A A     A A                  
                 A   A   A   A                   A   A   A   A                
               A A A A A A A A                 A A A A A A A A                
               A               A               A               A              
             A A             A A             A A             A A              
             A   A           A   A           A   A           A   A            
           A A A A         A A A A         A A A A         A A A A            
           A       A       A       A       A       A       A       A          
         A A     A A     A A     A A     A A     A A     A A     A A          
         A   A   A   A   A   A   A   A   A   A   A   A   A   A   A   A        
       A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A A        
       A                                                               A      
     A A                                                             A A      
     A   A                                                           A   A    
   A A A A                                                         A A A A    
   A       A                                                       A       A  
 A A     A A                                                     A A     A A  
 A   A   A   A                                                   A   A   A   A
> 
> ##-- Symbolic correlation matrices:
> symnum(cor(attitude), diag.lower.tri = FALSE)
           rt cm p l rs cr a
rating                      
complaints +                
privileges .  .             
learning   ,  .  .          
raises     .  ,  . ,        
critical             .      
advance          . . .      
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(cor(attitude), abbr.colnames = NULL)
                        
rating     1            
complaints + 1          
privileges . . 1        
learning   , . . 1      
raises     . , . , 1    
critical           . 1  
advance        . . .   1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(cor(attitude), abbr.colnames = FALSE)
           rating complaints privileges learning raises critical advance
rating     1                                                            
complaints +      1                                                     
privileges .      .          1                                          
learning   ,      .          .          1                               
raises     .      ,          .          ,        1                      
critical                                         .      1               
advance                      .          .        .               1      
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(cor(attitude), abbr.colnames = 2)
           rt cm pr lr rs cr ad
rating     1                   
complaints +  1                
privileges .  .  1             
learning   ,  .  .  1          
raises     .  ,  .  ,  1       
critical               .  1    
advance          .  .  .     1 
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> 
> symnum(cor(rbind(1, rnorm(25), rnorm(25)^2)))
                                                       
 [1,] 1                                                
 [2,] + 1                                              
 [3,] .   1                                            
 [4,] .   B 1                                          
 [5,] + B     1                                        
 [6,] .   B B   1                                      
 [7,] , * . . *   1                                    
 [8,] . + . , + . B 1                                  
 [9,] . + . , * . B B 1                                
[10,] .   B B   B . . . 1                              
[11,]     B B . * . , , B 1                            
[12,] , B   . B   B + *   . 1                          
[13,] 1 + . . + . , . . .   , 1                        
[14,] , . * +   B       * +   , 1                      
[15,]   . * B . + , + , * B .   , 1                    
[16,] * B     B   + , ,     * * .   1                  
[17,] * B     B   + , ,     * * .   B 1                
[18,]   , , + , , * B * , + ,   . * . . 1              
[19,] . + , , + . B B B , + + . . + , , B 1            
[20,] + B     B   B + *   . B +   . B B , + 1          
[21,]   , , + , , * B B , + +   . * . . B B , 1        
[22,] . , , , + . B B B , + + . . + , , B B + B 1      
[23,] B , , . , , .   . , . , B +   + +     ,     1    
[24,] * , , . , , .     , . . * * . , ,     .     B 1  
[25,] B + . . + . , . . .   , B ,   * *   . +   . B B 1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(cor(matrix(rexp(30, 1), 5, 18))) # <<-- PATTERN ! --
                                         
 [1,] 1                                  
 [2,] . 1                                
 [3,]     1                              
 [4,] . .   1                            
 [5,]   , .   1                          
 [6,]   ,     , 1                        
 [7,] 1 .   .     1                      
 [8,] . 1   . , , . 1                    
 [9,]     1   .       1                  
[10,] . .   1     . .   1                
[11,]   , .   1 ,   , .   1              
[12,]   ,     , 1   ,     , 1            
[13,] 1 .   .     1 .   .     1          
[14,] . 1   . , , . 1   . , , . 1        
[15,]     1   .       1   .       1      
[16,] . .   1     . .   1     . .   1    
[17,]   , .   1 ,   , .   1 ,   , .   1  
[18,]   ,     , 1   ,     , 1   ,     , 1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(cm1 <- cor(matrix(rnorm(90) ,  5, 18))) # < White Noise SMALL n
                                         
 [1,] 1                                  
 [2,] . 1                                
 [3,] . . 1                              
 [4,] , ,   1                            
 [5,]   . ,   1                          
 [6,]   . *   * 1                        
 [7,] , .   . .   1                      
 [8,] , .   .       1                    
 [9,]   .   . . . . . 1                  
[10,] , .   .     . B . 1                
[11,] ,   + . + + ,   .   1              
[12,]   , +   + *   .   . , 1            
[13,] . +   +     ,   +   .   1          
[14,]       .   .   . + ,     , 1        
[15,]   .   .     .   . .     . + 1      
[16,] .   , .   .   . . . .         1    
[17,]   , +   , +       . . * .     . 1  
[18,]   . .   * , . . .   , ,     .   . 1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(cm1, diag.lower.tri = FALSE)
                                        
 [1,]                                   
 [2,] .                                 
 [3,] . .                               
 [4,] , ,                               
 [5,]   . ,                             
 [6,]   . *   *                         
 [7,] , .   . .                         
 [8,] , .   .                           
 [9,]   .   . . . . .                   
[10,] , .   .     . B .                 
[11,] ,   + . + + ,   .                 
[12,]   , +   + *   .   . ,             
[13,] . +   +     ,   +   .             
[14,]       .   .   . + ,     ,         
[15,]   .   .     .   . .     . +       
[16,] .   , .   .   . . . .             
[17,]   , +   , +       . . * .     .   
[18,]   . .   * , . . .   , ,     .   . 
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(cm2 <- cor(matrix(rnorm(900), 50, 18))) # < White Noise "BIG" n
                                         
 [1,] 1                                  
 [2,]   1                                
 [3,]     1                              
 [4,]       1                            
 [5,]         1                          
 [6,]           1                        
 [7,]             1                      
 [8,]               1                    
 [9,]                 1                  
[10,]                   1                
[11,]                     1              
[12,]                       1            
[13,]               .     .   1          
[14,]                           1        
[15,]                             1      
[16,]                               1    
[17,]                                 1  
[18,]                                   1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> symnum(cm2, lower.triangular = FALSE)
                                         
 [1,] 1                                  
 [2,]   1                                
 [3,]     1                              
 [4,]       1                            
 [5,]         1                          
 [6,]           1                        
 [7,]             1                      
 [8,]               1         .          
 [9,]                 1                  
[10,]                   1                
[11,]                     1   .          
[12,]                       1            
[13,]               .     .   1          
[14,]                           1        
[15,]                             1      
[16,]                               1    
[17,]                                 1  
[18,]                                   1
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1
> 
> ## NA's:
> Cm <- cor(matrix(rnorm(60),  10, 6)); Cm[c(3,6), 2] <- NA
> symnum(Cm, show.max = NULL)
               
[1,]           
[2,]           
[3,] . ?       
[4,] .   ,     
[5,]     .     
[6,] , ?       
attr(,"legend")
[1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1 \t    ## NA: ‘?’
> 
> ## Graphical P-values (aka "significance stars"):
> pval <- rev(sort(c(outer(1:6, 10^-(1:3)))))
> symp <- symnum(pval, corr = FALSE,
+                cutpoints = c(0,  .001,.01,.05, .1, 1),
+                symbols = c("***","**","*","."," "))
> noquote(cbind(P.val = format(pval), Signif = symp))
      P.val Signif
 [1,] 0.600       
 [2,] 0.500       
 [3,] 0.400       
 [4,] 0.300       
 [5,] 0.200       
 [6,] 0.100 .     
 [7,] 0.060 .     
 [8,] 0.050 *     
 [9,] 0.040 *     
[10,] 0.030 *     
[11,] 0.020 *     
[12,] 0.010 **    
[13,] 0.006 **    
[14,] 0.005 **    
[15,] 0.004 **    
[16,] 0.003 **    
[17,] 0.002 **    
[18,] 0.001 ***   
> 
> 
> 
> cleanEx()
> nameEx("t.test")
> ### * t.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: t.test
> ### Title: Student's t-Test
> ### Aliases: t.test t.test.default t.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> ## Two-sample t-test
> t.test(1:10, y = c(7:20))      # P = .00001855

	Welch Two Sample t-test

data:  1:10 and c(7:20)
t = -5.4349, df = 21.982, p-value = 1.855e-05
alternative hypothesis: true difference in means is not equal to 0
95 percent confidence interval:
 -11.052802  -4.947198
sample estimates:
mean of x mean of y 
      5.5      13.5 

> t.test(1:10, y = c(7:20, 200)) # P = .1245    -- NOT significant anymore

	Welch Two Sample t-test

data:  1:10 and c(7:20, 200)
t = -1.6329, df = 14.165, p-value = 0.1245
alternative hypothesis: true difference in means is not equal to 0
95 percent confidence interval:
 -47.242900   6.376233
sample estimates:
mean of x mean of y 
  5.50000  25.93333 

> 
> ## Traditional interface
> with(mtcars, t.test(mpg[am == 0], mpg[am == 1]))

	Welch Two Sample t-test

data:  mpg[am == 0] and mpg[am == 1]
t = -3.7671, df = 18.332, p-value = 0.001374
alternative hypothesis: true difference in means is not equal to 0
95 percent confidence interval:
 -11.280194  -3.209684
sample estimates:
mean of x mean of y 
 17.14737  24.39231 

> 
> ## Formula interface
> t.test(mpg ~ am, data = mtcars)

	Welch Two Sample t-test

data:  mpg by am
t = -3.7671, df = 18.332, p-value = 0.001374
alternative hypothesis: true difference in means between group 0 and group 1 is not equal to 0
95 percent confidence interval:
 -11.280194  -3.209684
sample estimates:
mean in group 0 mean in group 1 
       17.14737        24.39231 

> 
> ## One-sample t-test
> ## Traditional interface
> t.test(sleep$extra)

	One Sample t-test

data:  sleep$extra
t = 3.413, df = 19, p-value = 0.002918
alternative hypothesis: true mean is not equal to 0
95 percent confidence interval:
 0.5955845 2.4844155
sample estimates:
mean of x 
     1.54 

> 
> ## Formula interface
> t.test(extra ~ 1, data = sleep)

	One Sample t-test

data:  extra
t = 3.413, df = 19, p-value = 0.002918
alternative hypothesis: true mean is not equal to 0
95 percent confidence interval:
 0.5955845 2.4844155
sample estimates:
mean of x 
     1.54 

> 
> ## Paired t-test
> ## The sleep data is actually paired, so could have been in wide format:
> sleep2 <- reshape(sleep, direction = "wide",
+                   idvar = "ID", timevar = "group")
> 
> ## Traditional interface
> t.test(sleep2$extra.1, sleep2$extra.2, paired = TRUE)

	Paired t-test

data:  sleep2$extra.1 and sleep2$extra.2
t = -4.0621, df = 9, p-value = 0.002833
alternative hypothesis: true mean difference is not equal to 0
95 percent confidence interval:
 -2.4598858 -0.7001142
sample estimates:
mean difference 
          -1.58 

> 
> ## Formula interface
> t.test(Pair(extra.1, extra.2) ~ 1, data = sleep2)

	Paired t-test

data:  Pair(extra.1, extra.2)
t = -4.0621, df = 9, p-value = 0.002833
alternative hypothesis: true mean difference is not equal to 0
95 percent confidence interval:
 -2.4598858 -0.7001142
sample estimates:
mean difference 
          -1.58 

> 
> 
> 
> cleanEx()
> nameEx("termplot")
> ### * termplot
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: termplot
> ### Title: Plot Regression Terms
> ### Aliases: termplot
> ### Keywords: hplot regression
> 
> ### ** Examples
> 
> require(graphics)
> 
> had.splines <- "package:splines" %in% search()
> if(!had.splines) rs <- require(splines)
Loading required package: splines
> x <- 1:100
> z <- factor(rep(LETTERS[1:4], 25))
> y <- rnorm(100, sin(x/10)+as.numeric(z))
> model <- glm(y ~ ns(x, 6) + z)
> 
> par(mfrow = c(2,2)) ## 2 x 2 plots for same model :
> termplot(model, main = paste("termplot( ", deparse(model$call)," ...)"))
> termplot(model, rug = TRUE)
> termplot(model, partial.resid = TRUE, se = TRUE, main = TRUE)
> termplot(model, partial.resid = TRUE, smooth = panel.smooth, span.smth = 1/4)
> if(!had.splines && rs) detach("package:splines")
> 
> if(requireNamespace("MASS", quietly = TRUE)) {
+ hills.lm <- lm(log(time) ~ log(climb)+log(dist), data = MASS::hills)
+ termplot(hills.lm, partial.resid = TRUE, smooth = panel.smooth,
+         terms = "log(dist)", main = "Original")
+ termplot(hills.lm, transform.x = TRUE,
+          partial.resid = TRUE, smooth = panel.smooth,
+ 	 terms = "log(dist)", main = "Transformed")
+ 
+ }
> 
> 
> graphics::par(get("par.postscript", pos = 'CheckExEnv'))
> cleanEx()
> nameEx("terms.object")
> ### * terms.object
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: terms.object
> ### Title: Description of Terms Objects
> ### Aliases: terms.object
> ### Keywords: models
> 
> ### ** Examples
> 
> ## use of specials (as used for gam() in packages mgcv and gam)
> (tf <- terms(y ~ x + x:z + s(x), specials = "s"))
y ~ x + x:z + s(x)
attr(,"variables")
list(y, x, z, s(x))
attr(,"factors")
     x s(x) x:z
y    0    0   0
x    1    0   2
z    0    0   1
s(x) 0    1   0
attr(,"term.labels")
[1] "x"    "s(x)" "x:z" 
attr(,"specials")
attr(,"specials")$s
[1] 4

attr(,"order")
[1] 1 1 2
attr(,"intercept")
[1] 1
attr(,"response")
[1] 1
attr(,".Environment")
<environment: R_GlobalEnv>
> ## Note that the "factors" attribute has variables as row names
> ## and term labels as column names, both as character vectors.
> attr(tf, "specials")    # index 's' variable(s)
$s
[1] 4

> rownames(attr(tf, "factors"))[attr(tf, "specials")$s]
[1] "s(x)"
> 
> ## we can keep the order by
> terms(y ~ x + x:z + s(x), specials = "s", keep.order = TRUE)
y ~ x + x:z + s(x)
attr(,"variables")
list(y, x, z, s(x))
attr(,"factors")
     x x:z s(x)
y    0   0    0
x    1   2    0
z    0   1    0
s(x) 0   0    1
attr(,"term.labels")
[1] "x"    "x:z"  "s(x)"
attr(,"specials")
attr(,"specials")$s
[1] 4

attr(,"order")
[1] 1 2 1
attr(,"intercept")
[1] 1
attr(,"response")
[1] 1
attr(,".Environment")
<environment: R_GlobalEnv>
> 
> 
> 
> cleanEx()
> nameEx("time")
> ### * time
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: time
> ### Title: Sampling Times of Time Series
> ### Aliases: time cycle frequency deltat time.default
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> cycle(presidents)
     Qtr1 Qtr2 Qtr3 Qtr4
1945    1    2    3    4
1946    1    2    3    4
1947    1    2    3    4
1948    1    2    3    4
1949    1    2    3    4
1950    1    2    3    4
1951    1    2    3    4
1952    1    2    3    4
1953    1    2    3    4
1954    1    2    3    4
1955    1    2    3    4
1956    1    2    3    4
1957    1    2    3    4
1958    1    2    3    4
1959    1    2    3    4
1960    1    2    3    4
1961    1    2    3    4
1962    1    2    3    4
1963    1    2    3    4
1964    1    2    3    4
1965    1    2    3    4
1966    1    2    3    4
1967    1    2    3    4
1968    1    2    3    4
1969    1    2    3    4
1970    1    2    3    4
1971    1    2    3    4
1972    1    2    3    4
1973    1    2    3    4
1974    1    2    3    4
> # a simple series plot
> plot(as.vector(time(presidents)), as.vector(presidents), type = "l")
> 
> 
> 
> cleanEx()
> nameEx("toeplitz")
> ### * toeplitz
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: toeplitz
> ### Title: Create Symmetric and Asymmetric Toeplitz Matrix
> ### Aliases: toeplitz toeplitz2
> ### Keywords: ts
> 
> ### ** Examples
> 
> x <- 1:5
> toeplitz (x)
     [,1] [,2] [,3] [,4] [,5]
[1,]    1    2    3    4    5
[2,]    2    1    2    3    4
[3,]    3    2    1    2    3
[4,]    4    3    2    1    2
[5,]    5    4    3    2    1
> 
> T. <- toeplitz (1:5, 11:13) # with a  *Warning* x[1] != r[1]
Warning in toeplitz(1:5, 11:13) : x[1] != r[1]; using x[1] for diagonal
> T2 <- toeplitz2(c(13:12, 1:5), 5, 3)# this is the same matrix:
> stopifnot(identical(T., T2))
> 
> # Matrix of character (could also have logical, raw, complex ..) {also warning}:
> noquote(toeplitz(letters[1:4], LETTERS[20:26]))
Warning in toeplitz(letters[1:4], LETTERS[20:26]) :
  x[1] != r[1]; using x[1] for diagonal
     [,1] [,2] [,3] [,4] [,5] [,6] [,7]
[1,] a    U    V    W    X    Y    Z   
[2,] b    a    U    V    W    X    Y   
[3,] c    b    a    U    V    W    X   
[4,] d    c    b    a    U    V    W   
> 
> ## A convolution/smoother weight matrix :
> m <- 17
> k <- length(wts <- c(76, 99, 60, 20, 1))
> n <- m-k+1
> ## Convolution
> W <- toeplitz2(c(rep(0, m-k), wts, rep(0, m-k)), ncol=n)
> ## IGNORE_RDIFF_BEGIN
> ## Don't show: 
> hadM <- isNamespaceLoaded("Matrix")
> ## End(Don't show)
> ## "display" nicely :
> if(requireNamespace("Matrix"))
+    print(Matrix::Matrix(W))    else {
+    colnames(W) <- paste0(",", if(n <= 9) 1:n else c(1:9, letters[seq_len(n-9)]))
+    print(W)
+ }
Loading required namespace: Matrix
17 x 13 sparse Matrix of class "dgCMatrix"
                                            
 [1,] 76  .  .  .  .  .  .  .  .  .  .  .  .
 [2,] 99 76  .  .  .  .  .  .  .  .  .  .  .
 [3,] 60 99 76  .  .  .  .  .  .  .  .  .  .
 [4,] 20 60 99 76  .  .  .  .  .  .  .  .  .
 [5,]  1 20 60 99 76  .  .  .  .  .  .  .  .
 [6,]  .  1 20 60 99 76  .  .  .  .  .  .  .
 [7,]  .  .  1 20 60 99 76  .  .  .  .  .  .
 [8,]  .  .  .  1 20 60 99 76  .  .  .  .  .
 [9,]  .  .  .  .  1 20 60 99 76  .  .  .  .
[10,]  .  .  .  .  .  1 20 60 99 76  .  .  .
[11,]  .  .  .  .  .  .  1 20 60 99 76  .  .
[12,]  .  .  .  .  .  .  .  1 20 60 99 76  .
[13,]  .  .  .  .  .  .  .  .  1 20 60 99 76
[14,]  .  .  .  .  .  .  .  .  .  1 20 60 99
[15,]  .  .  .  .  .  .  .  .  .  .  1 20 60
[16,]  .  .  .  .  .  .  .  .  .  .  .  1 20
[17,]  .  .  .  .  .  .  .  .  .  .  .  .  1
> ## Don't show: 
> if(!hadM) unloadNamespace("Matrix")
> ## End(Don't show)
> 
> ## IGNORE_RDIFF_END
> ## scale W to have column sums 1:
> W. <- W / sum(wts)
> all.equal(rep(1, ncol(W.)), colSums(W.), check.attributes = FALSE)
[1] TRUE
> ## Visualize "mass-preserving" convolution
> x <- 1:n; f <- function(x) exp(-((x - .4*n)/3)^2)
> y <- f(x) + rep_len(3:-2, n)/10
> ## Smoothing convolution:
> y.hat <- W. %*% y # y.hat := smoothed(y) ("mass preserving" -> longer than y)
> stopifnot(length(y.hat) == m, m == n + (k-1))
> plot(x,y, type="b", xlim=c(1,m)); curve(f(x), 1,n, col="gray", lty=2, add=TRUE)
> lines(1:m, y.hat, col=2, lwd=3)
> rbind(sum(y), sum(y.hat)) ## mass preserved
         [,1]
[1,] 6.149807
[2,] 6.149807
> 
> ## And, yes, convolve(y, *) does the same when called appropriately:
> all.equal(c(y.hat), convolve(y, rev(wts/sum(wts)), type="open"))
[1] TRUE
> 
> 
> 
> cleanEx()
> nameEx("ts")
> ### * ts
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ts
> ### Title: Time-Series Objects
> ### Aliases: ts as.ts as.ts.default is.ts Ops.ts cbind.ts is.mts [.ts t.ts
> ###   head.ts tail.ts
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> ts(1:10, frequency = 4, start = c(1959, 2)) # 2nd Quarter of 1959
     Qtr1 Qtr2 Qtr3 Qtr4
1959         1    2    3
1960    4    5    6    7
1961    8    9   10     
> print( ts(1:10, frequency = 7, start = c(12, 2)), calendar = TRUE)
   p1 p2 p3 p4 p5 p6 p7
12     1  2  3  4  5  6
13  7  8  9 10         
> # print.ts(.)
> ## Using July 1954 as start date:
> gnp <- ts(cumsum(1 + round(rnorm(100), 2)),
+           start = c(1954, 7), frequency = 12)
> plot(gnp) # using 'plot.ts' for time-series plot
> 
> 
> ## "ts" methods for head() and tail()
> utils:: head(lynx, 4)
Time Series:
Start = 1821 
End = 1824 
Frequency = 1 
[1] 269 321 585 871
> utils:: tail(lynx, -7)
Time Series:
Start = 1828 
End = 1934 
Frequency = 1 
  [1] 5943 4950 2577  523   98  184  279  409 2285 2685 3409 1824  409  151   45
 [16]   68  213  546 1033 2129 2536  957  361  377  225  360  731 1638 2725 2871
 [31] 2119  684  299  236  245  552 1623 3311 6721 4254  687  255  473  358  784
 [46] 1594 1676 2251 1426  756  299  201  229  469  736 2042 2811 4431 2511  389
 [61]   73   39   49   59  188  377 1292 4031 3495  587  105  153  387  758 1307
 [76] 3465 6991 6313 3794 1836  345  382  808 1388 2713 3800 3091 2985 3790  674
 [91]   81   80  108  229  399 1132 2432 3574 2935 1537  529  485  662 1000 1590
[106] 2657 3396
> ## Don't show: 
>  stopifnot(is.ts(head(lynx, -6)),
+                      is.ts(tail(lynx, 3)))
> ## End(Don't show)
> 
> ## Multivariate
> z <- ts(matrix(rnorm(300), 100, 3), start = c(1961, 1), frequency = 12)
> class(z)
[1] "mts"    "ts"     "matrix" "array" 
> is.mts(z)
[1] TRUE
> head(z) # "ts" method ==> incl. times:
            Series 1   Series 2   Series 3
Jan 1961 -0.62036668  0.4094018  0.8936737
Feb 1961  0.04211587  1.6888733 -1.0472981
Mar 1961 -0.91092165  1.5865884  1.9713374
Apr 1961  0.15802877 -0.3309078 -0.3836321
May 1961 -0.65458464 -2.2852355  1.6541453
Jun 1961  1.76728727  2.4976616  1.5122127
> plot(z)
> plot(z, plot.type = "single", lty = 1:3)
> 
> ## A phase plot:
> plot(nhtemp, lag(nhtemp, 1), cex = .8, col = "blue",
+      main = "Lag plot of New Haven temperatures")
> 
> 
> 
> cleanEx()
> nameEx("ts.plot")
> ### * ts.plot
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ts.plot
> ### Title: Plot Multiple Time Series
> ### Aliases: ts.plot
> ### Keywords: ts
> 
> ### ** Examples
> 
> require(graphics)
> 
> ts.plot(ldeaths, mdeaths, fdeaths,
+         gpars=list(xlab="year", ylab="deaths", lty=c(1:3)))
> 
> 
> 
> cleanEx()
> nameEx("ts.union")
> ### * ts.union
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: ts.union
> ### Title: Bind Two or More Time Series
> ### Aliases: ts.union ts.intersect
> ### Keywords: ts
> 
> ### ** Examples
> 
> ts.union(mdeaths, fdeaths)
         mdeaths fdeaths
Jan 1974    2134     901
Feb 1974    1863     689
Mar 1974    1877     827
Apr 1974    1877     677
May 1974    1492     522
Jun 1974    1249     406
Jul 1974    1280     441
Aug 1974    1131     393
Sep 1974    1209     387
Oct 1974    1492     582
Nov 1974    1621     578
Dec 1974    1846     666
Jan 1975    2103     830
Feb 1975    2137     752
Mar 1975    2153     785
Apr 1975    1833     664
May 1975    1403     467
Jun 1975    1288     438
Jul 1975    1186     421
Aug 1975    1133     412
Sep 1975    1053     343
Oct 1975    1347     440
Nov 1975    1545     531
Dec 1975    2066     771
Jan 1976    2020     767
Feb 1976    2750    1141
Mar 1976    2283     896
Apr 1976    1479     532
May 1976    1189     447
Jun 1976    1160     420
Jul 1976    1113     376
Aug 1976     970     330
Sep 1976     999     357
Oct 1976    1208     445
Nov 1976    1467     546
Dec 1976    2059     764
Jan 1977    2240     862
Feb 1977    1634     660
Mar 1977    1722     663
Apr 1977    1801     643
May 1977    1246     502
Jun 1977    1162     392
Jul 1977    1087     411
Aug 1977    1013     348
Sep 1977     959     387
Oct 1977    1179     385
Nov 1977    1229     411
Dec 1977    1655     638
Jan 1978    2019     796
Feb 1978    2284     853
Mar 1978    1942     737
Apr 1978    1423     546
May 1978    1340     530
Jun 1978    1187     446
Jul 1978    1098     431
Aug 1978    1004     362
Sep 1978     970     387
Oct 1978    1140     430
Nov 1978    1110     425
Dec 1978    1812     679
Jan 1979    2263     821
Feb 1979    1820     785
Mar 1979    1846     727
Apr 1979    1531     612
May 1979    1215     478
Jun 1979    1075     429
Jul 1979    1056     405
Aug 1979     975     379
Sep 1979     940     393
Oct 1979    1081     411
Nov 1979    1294     487
Dec 1979    1341     574
> cbind(mdeaths, fdeaths) # same as the previous line
         mdeaths fdeaths
Jan 1974    2134     901
Feb 1974    1863     689
Mar 1974    1877     827
Apr 1974    1877     677
May 1974    1492     522
Jun 1974    1249     406
Jul 1974    1280     441
Aug 1974    1131     393
Sep 1974    1209     387
Oct 1974    1492     582
Nov 1974    1621     578
Dec 1974    1846     666
Jan 1975    2103     830
Feb 1975    2137     752
Mar 1975    2153     785
Apr 1975    1833     664
May 1975    1403     467
Jun 1975    1288     438
Jul 1975    1186     421
Aug 1975    1133     412
Sep 1975    1053     343
Oct 1975    1347     440
Nov 1975    1545     531
Dec 1975    2066     771
Jan 1976    2020     767
Feb 1976    2750    1141
Mar 1976    2283     896
Apr 1976    1479     532
May 1976    1189     447
Jun 1976    1160     420
Jul 1976    1113     376
Aug 1976     970     330
Sep 1976     999     357
Oct 1976    1208     445
Nov 1976    1467     546
Dec 1976    2059     764
Jan 1977    2240     862
Feb 1977    1634     660
Mar 1977    1722     663
Apr 1977    1801     643
May 1977    1246     502
Jun 1977    1162     392
Jul 1977    1087     411
Aug 1977    1013     348
Sep 1977     959     387
Oct 1977    1179     385
Nov 1977    1229     411
Dec 1977    1655     638
Jan 1978    2019     796
Feb 1978    2284     853
Mar 1978    1942     737
Apr 1978    1423     546
May 1978    1340     530
Jun 1978    1187     446
Jul 1978    1098     431
Aug 1978    1004     362
Sep 1978     970     387
Oct 1978    1140     430
Nov 1978    1110     425
Dec 1978    1812     679
Jan 1979    2263     821
Feb 1979    1820     785
Mar 1979    1846     727
Apr 1979    1531     612
May 1979    1215     478
Jun 1979    1075     429
Jul 1979    1056     405
Aug 1979     975     379
Sep 1979     940     393
Oct 1979    1081     411
Nov 1979    1294     487
Dec 1979    1341     574
> ts.intersect(window(mdeaths, 1976), window(fdeaths, 1974, 1978))
         window(mdeaths, 1976) window(fdeaths, 1974, 1978)
Jan 1976                  2020                         767
Feb 1976                  2750                        1141
Mar 1976                  2283                         896
Apr 1976                  1479                         532
May 1976                  1189                         447
Jun 1976                  1160                         420
Jul 1976                  1113                         376
Aug 1976                   970                         330
Sep 1976                   999                         357
Oct 1976                  1208                         445
Nov 1976                  1467                         546
Dec 1976                  2059                         764
Jan 1977                  2240                         862
Feb 1977                  1634                         660
Mar 1977                  1722                         663
Apr 1977                  1801                         643
May 1977                  1246                         502
Jun 1977                  1162                         392
Jul 1977                  1087                         411
Aug 1977                  1013                         348
Sep 1977                   959                         387
Oct 1977                  1179                         385
Nov 1977                  1229                         411
Dec 1977                  1655                         638
Jan 1978                  2019                         796
> 
> sales1 <- ts.union(BJsales, lead = BJsales.lead)
> ts.intersect(sales1, lead3 = lag(BJsales.lead, -3))
Time Series:
Start = 4 
End = 150 
Frequency = 1 
    sales1.BJsales sales1.lead lead3
  4          198.9        9.75 10.01
  5          199.0       10.33 10.07
  6          200.2       10.13 10.32
  7          198.6       10.36  9.75
  8          200.0       10.32 10.33
  9          200.3       10.13 10.13
 10          201.2       10.16 10.36
 11          201.6       10.58 10.32
 12          201.5       10.62 10.13
 13          201.5       10.86 10.16
 14          203.5       11.20 10.58
 15          204.9       10.74 10.62
 16          207.1       10.56 10.86
 17          210.5       10.48 11.20
 18          210.5       10.77 10.74
 19          209.8       11.33 10.56
 20          208.8       10.96 10.48
 21          209.5       11.16 10.77
 22          213.2       11.70 11.33
 23          213.7       11.39 10.96
 24          215.1       11.42 11.16
 25          218.7       11.94 11.70
 26          219.8       11.24 11.39
 27          220.5       11.59 11.42
 28          223.8       10.96 11.94
 29          222.8       11.40 11.24
 30          223.8       11.02 11.59
 31          221.7       11.01 10.96
 32          222.3       11.23 11.40
 33          220.8       11.33 11.02
 34          219.4       10.83 11.01
 35          220.1       10.84 11.23
 36          220.6       11.14 11.33
 37          218.9       10.38 10.83
 38          217.8       10.90 10.84
 39          217.7       11.05 11.14
 40          215.0       11.11 10.38
 41          215.3       11.01 10.90
 42          215.9       11.22 11.05
 43          216.7       11.21 11.11
 44          216.7       11.91 11.01
 45          217.7       11.69 11.22
 46          218.7       10.93 11.21
 47          222.9       10.99 11.91
 48          224.9       11.01 11.69
 49          222.2       10.84 10.93
 50          220.7       10.76 10.99
 51          220.0       10.77 11.01
 52          218.7       10.88 10.84
 53          217.0       10.49 10.76
 54          215.9       10.50 10.77
 55          215.8       11.00 10.88
 56          214.1       10.98 10.49
 57          212.3       10.61 10.50
 58          213.9       10.48 11.00
 59          214.6       10.53 10.98
 60          213.6       11.07 10.61
 61          212.1       10.61 10.48
 62          211.4       10.86 10.53
 63          213.1       10.34 11.07
 64          212.9       10.78 10.61
 65          213.3       10.80 10.86
 66          211.5       10.33 10.34
 67          212.3       10.44 10.78
 68          213.0       10.50 10.80
 69          211.0       10.75 10.33
 70          210.7       10.40 10.44
 71          210.1       10.40 10.50
 72          211.4       10.34 10.75
 73          210.0       10.55 10.40
 74          209.7       10.46 10.40
 75          208.8       10.82 10.34
 76          208.8       10.91 10.55
 77          208.8       10.87 10.46
 78          210.6       10.67 10.82
 79          211.9       11.11 10.91
 80          212.8       10.88 10.87
 81          212.5       11.28 10.67
 82          214.8       11.27 11.11
 83          215.3       11.44 10.88
 84          217.5       11.52 11.28
 85          218.8       12.10 11.27
 86          220.7       11.83 11.44
 87          222.2       12.62 11.52
 88          226.7       12.41 12.10
 89          228.4       12.43 11.83
 90          233.2       12.73 12.62
 91          235.7       13.01 12.41
 92          237.1       12.74 12.43
 93          240.6       12.73 12.73
 94          243.8       12.76 13.01
 95          245.3       12.92 12.74
 96          246.0       12.64 12.73
 97          246.3       12.79 12.76
 98          247.7       13.05 12.92
 99          247.6       12.69 12.64
100          247.8       13.01 12.79
101          249.4       12.90 13.05
102          249.0       13.12 12.69
103          249.9       12.47 13.01
104          250.5       12.47 12.90
105          251.5       12.94 13.12
106          249.0       13.10 12.47
107          247.6       12.91 12.47
108          248.8       13.39 12.94
109          250.4       13.13 13.10
110          250.7       13.34 12.91
111          253.0       13.34 13.39
112          253.7       13.14 13.13
113          255.0       13.49 13.34
114          256.2       13.87 13.34
115          256.0       13.39 13.14
116          257.4       13.59 13.49
117          260.4       13.27 13.87
118          260.0       13.70 13.39
119          261.3       13.20 13.59
120          260.4       13.32 13.27
121          261.6       13.15 13.70
122          260.8       13.30 13.20
123          259.8       12.94 13.32
124          259.0       13.29 13.15
125          258.9       13.26 13.30
126          257.4       13.08 12.94
127          257.7       13.24 13.29
128          257.9       13.31 13.26
129          257.4       13.52 13.08
130          257.3       13.02 13.24
131          257.6       13.25 13.31
132          258.9       13.12 13.52
133          257.8       13.26 13.02
134          257.7       13.11 13.25
135          257.2       13.30 13.12
136          257.5       13.06 13.26
137          256.8       13.32 13.11
138          257.5       13.10 13.30
139          257.0       13.27 13.06
140          257.6       13.64 13.32
141          257.3       13.58 13.10
142          257.5       13.87 13.27
143          259.6       13.53 13.64
144          261.1       13.41 13.58
145          262.9       13.25 13.87
146          263.3       13.50 13.53
147          262.8       13.58 13.41
148          261.8       13.51 13.25
149          262.2       13.77 13.50
150          262.7       13.40 13.58
> 
> 
> 
> cleanEx()
> nameEx("tsdiag")
> ### * tsdiag
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: tsdiag
> ### Title: Diagnostic Plots for Time-Series Fits
> ### Aliases: tsdiag tsdiag.arima0 tsdiag.Arima tsdiag.StructTS
> ### Keywords: ts
> 
> ### ** Examples
> 
> 
> 
> cleanEx()
> nameEx("uniroot")
> ### * uniroot
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: uniroot
> ### Title: One Dimensional Root (Zero) Finding
> ### Aliases: uniroot
> ### Keywords: optimize
> 
> ### ** Examples
> 
> ##--- uniroot() with new interval extension + checking features: --------------
> 
> f1 <- function(x) (121 - x^2)/(x^2+1)
> f2 <- function(x) exp(-x)*(x - 12)
> 
> try(uniroot(f1, c(0,10)))
Error in uniroot(f1, c(0, 10)) : 
  f() values at end points not of opposite sign
> try(uniroot(f2, c(0, 2)))
Error in uniroot(f2, c(0, 2)) : 
  f() values at end points not of opposite sign
> ##--> error: f() .. end points not of opposite sign
> 
> ## where as  'extendInt="yes"'  simply first enlarges the search interval:
> u1 <- uniroot(f1, c(0,10),extendInt="yes", trace=1)
search in [0,10] ... extended to [-1.5e-05, 11.5] in 4 steps
> u2 <- uniroot(f2, c(0,2), extendInt="yes", trace=2)
search in [0,2]
 .. modified lower,upper: (         -1e-06,           2.02)
 .. modified lower,upper: (         -3e-06,           2.06)
 .. modified lower,upper: (         -7e-06,           2.14)
 .. modified lower,upper: (       -1.5e-05,            2.3)
 .. modified lower,upper: (       -3.1e-05,           2.62)
 .. modified lower,upper: (       -6.3e-05,           3.26)
 .. modified lower,upper: (      -0.000127,           4.54)
 .. modified lower,upper: (      -0.000255,            7.1)
 .. modified lower,upper: (      -0.000511,          12.22)
> stopifnot(all.equal(u1$root, 11, tolerance = 1e-5),
+           all.equal(u2$root, 12, tolerance = 6e-6))
> 
> ## The *danger* of interval extension:
> ## No way to find a zero of a positive function, but
> ## numerically, f(-|M|) becomes zero :
> u3 <- uniroot(exp, c(0,2), extendInt="yes", trace=TRUE)
search in [0,2] ... extended to [-1073.74, 2.14748e+07] in 30 steps
> 
> ## Nonsense example (must give an error):
> tools::assertCondition( uniroot(function(x) 1, 0:1, extendInt="yes"),
+                        "error", verbose=TRUE)
assertCondition: caught “error”
> 
> ## Convergence checking :
> sinc <- function(x) ifelse(x == 0, 1, sin(x)/x)
> curve(sinc, -6,18); abline(h=0,v=0, lty=3, col=adjustcolor("gray", 0.8))
> ## Don't show: 
> tools::assertWarning(
+ ## End(Don't show)
+ uniroot(sinc, c(0,5), extendInt="yes", maxiter=4) #-> "just" a warning
+ ## Don't show: 
+  , verbose=TRUE)
Asserted warning: _NOT_ converged in 4 iterations
> ## End(Don't show)
> 
> ## now with  check.conv=TRUE, must signal a convergence error :
> ## Don't show: 
> tools::assertError(
+ ## End(Don't show)
+ uniroot(sinc, c(0,5), extendInt="yes", maxiter=4, check.conv=TRUE)
+ ## Don't show: 
+  , verbose=TRUE)
Asserted error: _NOT_ converged in 4 iterations
> ## End(Don't show)
> 
> ### Weibull cumulative hazard (example origin, Ravi Varadhan):
> cumhaz <- function(t, a, b) b * (t/b)^a
> froot <- function(x, u, a, b) cumhaz(x, a, b) - u
> 
> n <- 1000
> u <- -log(runif(n))
> a <- 1/2
> b <- 1
> ## Find failure times
> ru <- sapply(u, function(x)
+    uniroot(froot, u=x, a=a, b=b, interval= c(1.e-14, 1e04),
+            extendInt="yes")$root)
> ru2 <- sapply(u, function(x)
+    uniroot(froot, u=x, a=a, b=b, interval= c(0.01,  10),
+            extendInt="yes")$root)
> stopifnot(all.equal(ru, ru2, tolerance = 6e-6))
> 
> r1 <- uniroot(froot, u= 0.99, a=a, b=b, interval= c(0.01, 10),
+              extendInt="up")
> stopifnot(all.equal(0.99, cumhaz(r1$root, a=a, b=b)))
> 
> ## An error if 'extendInt' assumes "wrong zero-crossing direction":
> ## Don't show: 
> tools::assertError(
+ ## End(Don't show)
+ uniroot(froot, u= 0.99, a=a, b=b, interval= c(0.1, 10), extendInt="down")
+ ## Don't show: 
+  , verbose=TRUE)
Asserted error: no sign change found in 1000 iterations
> ## End(Don't show)
> 
> 
> 
> cleanEx()
> nameEx("update")
> ### * update
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: update
> ### Title: Update and Re-fit a Model Call
> ### Aliases: update update.default getCall getCall.default
> ### Keywords: models
> 
> ### ** Examples
> 
> oldcon <- options(contrasts = c("contr.treatment", "contr.poly"))
> ## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
> ## Page 9: Plant Weight Data.
> ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
> trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
> group <- gl(2, 10, 20, labels = c("Ctl", "Trt"))
> weight <- c(ctl, trt)
> lm.D9 <- lm(weight ~ group)
> lm.D9

Call:
lm(formula = weight ~ group)

Coefficients:
(Intercept)     groupTrt  
      5.032       -0.371  

> summary(lm.D90 <- update(lm.D9, . ~ . - 1))

Call:
lm(formula = weight ~ group - 1)

Residuals:
    Min      1Q  Median      3Q     Max 
-1.0710 -0.4938  0.0685  0.2462  1.3690 

Coefficients:
         Estimate Std. Error t value Pr(>|t|)    
groupCtl   5.0320     0.2202   22.85 9.55e-15 ***
groupTrt   4.6610     0.2202   21.16 3.62e-14 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 0.6964 on 18 degrees of freedom
Multiple R-squared:  0.9818,	Adjusted R-squared:  0.9798 
F-statistic: 485.1 on 2 and 18 DF,  p-value: < 2.2e-16

> options(contrasts = c("contr.helmert", "contr.poly"))
> update(lm.D9)

Call:
lm(formula = weight ~ group)

Coefficients:
(Intercept)       group1  
     4.8465      -0.1855  

> getCall(lm.D90)  # "through the origin"
lm(formula = weight ~ group - 1)
> 
> options(oldcon)
> 
> 
> 
> base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly"))
> cleanEx()
> nameEx("update.formula")
> ### * update.formula
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: update.formula
> ### Title: Model Updating
> ### Aliases: update.formula
> ### Keywords: models
> 
> ### ** Examples
> 
> update(y ~ x,    ~ . + x2) #> y ~ x + x2
y ~ x + x2
> update(y ~ x, log(.) ~ . ) #> log(y) ~ x
log(y) ~ x
> update(. ~ u+v, res  ~ . ) #> res ~ u + v
res ~ u + v
> 
> 
> 
> cleanEx()
> nameEx("var.test")
> ### * var.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: var.test
> ### Title: F Test to Compare Two Variances
> ### Aliases: var.test var.test.default var.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> x <- rnorm(50, mean = 0, sd = 2)
> y <- rnorm(30, mean = 1, sd = 1)
> var.test(x, y)                  # Do x and y have the same variance?

	F test to compare two variances

data:  x and y
F = 2.6522, num df = 49, denom df = 29, p-value = 0.006232
alternative hypothesis: true ratio of variances is not equal to 1
95 percent confidence interval:
 1.332510 4.989832
sample estimates:
ratio of variances 
          2.652168 

> var.test(lm(x ~ 1), lm(y ~ 1))  # The same.

	F test to compare two variances

data:  lm(x ~ 1) and lm(y ~ 1)
F = 2.6522, num df = 49, denom df = 29, p-value = 0.006232
alternative hypothesis: true ratio of variances is not equal to 1
95 percent confidence interval:
 1.332510 4.989832
sample estimates:
ratio of variances 
          2.652168 

> 
> 
> 
> cleanEx()
> nameEx("varimax")
> ### * varimax
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: varimax
> ### Title: Rotation Methods for Factor Analysis
> ### Aliases: promax varimax
> ### Keywords: multivariate
> 
> ### ** Examples
> 
> ## varimax with normalize = TRUE is the default
> fa <- factanal( ~., 2, data = swiss)
> varimax(loadings(fa), normalize = FALSE)
$loadings

Loadings:
                 Factor1 Factor2
Fertility        -0.650   0.398 
Agriculture      -0.628   0.337 
Examination       0.681  -0.515 
Education         0.997         
Catholic         -0.117   0.962 
Infant.Mortality          0.176 

               Factor1 Factor2
SS loadings      2.297   1.496
Proportion Var   0.383   0.249
Cumulative Var   0.383   0.632

$rotmat
            [,1]         [,2]
[1,] 0.999973881 -0.007227595
[2,] 0.007227595  0.999973881

> promax(loadings(fa))
$loadings

Loadings:
                 Factor1 Factor2
Fertility        -0.595   0.227 
Agriculture      -0.599   0.160 
Examination       0.577  -0.360 
Education         1.192   0.363 
Catholic          0.326   1.147 
Infant.Mortality          0.180 

               Factor1 Factor2
SS loadings      2.574   1.686
Proportion Var   0.429   0.281
Cumulative Var   0.429   0.710

$rotmat
          [,1]      [,2]
[1,] 1.2114045 0.4029296
[2,] 0.4956199 1.2453063

> 
> 
> 
> cleanEx()
> nameEx("weighted.mean")
> ### * weighted.mean
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: weighted.mean
> ### Title: Weighted Arithmetic Mean
> ### Aliases: weighted.mean weighted.mean.default
> ### Keywords: univar
> 
> ### ** Examples
> 
> ## GPA from Siegel 1994
> wt <- c(5,  5,  4,  1)/15
> x <- c(3.7,3.3,3.5,2.8)
> xm <- weighted.mean(x, wt)
> 
> 
> 
> cleanEx()
> nameEx("weighted.residuals")
> ### * weighted.residuals
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: weighted.residuals
> ### Title: Compute Weighted Residuals
> ### Aliases: weighted.residuals
> ### Keywords: regression
> 
> ### ** Examples
> 
> ## following on from example(lm)
> ## Don't show: 
> utils::example("lm", echo = FALSE)
> ## End(Don't show)
> all.equal(weighted.residuals(lm.D9),
+           residuals(lm.D9))
[1] TRUE
> x <- 1:10
> w <- 0:9
> y <- rnorm(x)
> weighted.residuals(lmxy <- lm(y ~ x, weights = w))
          2           3           4           5           6           7 
-0.15744267 -1.63534445  2.24282864  0.09895229 -2.41528559  0.60771924 
          8           9          10 
 1.37406419  1.06675699 -1.45098819 
> weighted.residuals(lmxy, drop0 = FALSE)
          1           2           3           4           5           6 
 0.00000000 -0.15744267 -1.63534445  2.24282864  0.09895229 -2.41528559 
          7           8           9          10 
 0.60771924  1.37406419  1.06675699 -1.45098819 
> 
> 
> 
> cleanEx()
> nameEx("wilcox.test")
> ### * wilcox.test
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: wilcox.test
> ### Title: Wilcoxon Rank Sum and Signed Rank Tests
> ### Aliases: wilcox.test wilcox.test.default wilcox.test.formula
> ### Keywords: htest
> 
> ### ** Examples
> 
> require(graphics)
> ## One-sample test.
> ## Hollander & Wolfe (1973), 29f.
> ## Hamilton depression scale factor measurements in 9 patients with
> ##  mixed anxiety and depression, taken at the first (x) and second
> ##  (y) visit after initiation of a therapy (administration of a
> ##  tranquilizer).
> x <- c(1.83,  0.50,  1.62,  2.48, 1.68, 1.88, 1.55, 3.06, 1.30)
> y <- c(0.878, 0.647, 0.598, 2.05, 1.06, 1.29, 1.06, 3.14, 1.29)
> wilcox.test(x, y, paired = TRUE, alternative = "greater")

	Wilcoxon signed rank exact test

data:  x and y
V = 40, p-value = 0.01953
alternative hypothesis: true location shift is greater than 0

> wilcox.test(y - x, alternative = "less")    # The same.

	Wilcoxon signed rank exact test

data:  y - x
V = 5, p-value = 0.01953
alternative hypothesis: true location is less than 0

> wilcox.test(y - x, alternative = "less",
+             exact = FALSE, correct = FALSE) # H&W large sample

	Wilcoxon signed rank test

data:  y - x
V = 5, p-value = 0.01908
alternative hypothesis: true location is less than 0

>                                             # approximation
> 
> ## Formula interface to one-sample and paired tests
> 
> depression <- data.frame(first = x, second = y, change = y - x)
> wilcox.test(change ~ 1, data = depression)

	Wilcoxon signed rank exact test

data:  change
V = 5, p-value = 0.03906
alternative hypothesis: true location is not equal to 0

> wilcox.test(Pair(first, second) ~ 1, data = depression)

	Wilcoxon signed rank exact test

data:  Pair(first, second)
V = 40, p-value = 0.03906
alternative hypothesis: true location shift is not equal to 0

> 
> ## Two-sample test.
> ## Hollander & Wolfe (1973), 69f.
> ## Permeability constants of the human chorioamnion (a placental
> ##  membrane) at term (x) and between 12 to 26 weeks gestational
> ##  age (y).  The alternative of interest is greater permeability
> ##  of the human chorioamnion for the term pregnancy.
> x <- c(0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
> y <- c(1.15, 0.88, 0.90, 0.74, 1.21)
> wilcox.test(x, y, alternative = "g")        # greater

	Wilcoxon rank sum exact test

data:  x and y
W = 35, p-value = 0.1272
alternative hypothesis: true location shift is greater than 0

> wilcox.test(x, y, alternative = "greater",
+             exact = FALSE, correct = FALSE) # H&W large sample

	Wilcoxon rank sum test

data:  x and y
W = 35, p-value = 0.1103
alternative hypothesis: true location shift is greater than 0

>                                             # approximation
> 
> wilcox.test(rnorm(10), rnorm(10, 2), conf.int = TRUE)

	Wilcoxon rank sum exact test

data:  rnorm(10) and rnorm(10, 2)
W = 7, p-value = 0.0004871
alternative hypothesis: true location shift is not equal to 0
95 percent confidence interval:
 -3.024352 -1.348555
sample estimates:
difference in location 
             -2.262424 

> 
> ## Formula interface.
> boxplot(Ozone ~ Month, data = airquality)
> wilcox.test(Ozone ~ Month, data = airquality,
+             subset = Month %in% c(5, 8))
Warning in wilcox.test.default(x = DATA[[1L]], y = DATA[[2L]], ...) :
  cannot compute exact p-value with ties

	Wilcoxon rank sum test with continuity correction

data:  Ozone by Month
W = 127.5, p-value = 0.0001208
alternative hypothesis: true location shift is not equal to 0

> 
> ## accuracy in ties determination via 'digits.rank':
> wilcox.test( 4:2,      3:1,     paired=TRUE) # Warning:  cannot compute exact p-value with ties
Warning in wilcox.test.default(4:2, 3:1, paired = TRUE) :
  cannot compute exact p-value with ties

	Wilcoxon signed rank test with continuity correction

data:  4:2 and 3:1
V = 6, p-value = 0.1489
alternative hypothesis: true location shift is not equal to 0

> wilcox.test((4:2)/10, (3:1)/10, paired=TRUE) # no ties => *no* warning

	Wilcoxon signed rank exact test

data:  (4:2)/10 and (3:1)/10
V = 6, p-value = 0.25
alternative hypothesis: true location shift is not equal to 0

> wilcox.test((4:2)/10, (3:1)/10, paired=TRUE, digits.rank = 9) # same ties as (4:2, 3:1)
Warning in wilcox.test.default((4:2)/10, (3:1)/10, paired = TRUE, digits.rank = 9) :
  cannot compute exact p-value with ties

	Wilcoxon signed rank test with continuity correction

data:  (4:2)/10 and (3:1)/10
V = 6, p-value = 0.1489
alternative hypothesis: true location shift is not equal to 0

> 
> 
> 
> cleanEx()
> nameEx("window")
> ### * window
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: window
> ### Title: Time (Series) Windows
> ### Aliases: window window.default window.ts window<- window<-.ts
> ### Keywords: ts
> 
> ### ** Examples
> 
> window(presidents, 1960, c(1969,4)) # values in the 1960's
     Qtr1 Qtr2 Qtr3 Qtr4
1960   71   62   61   57
1961   72   83   71   78
1962   79   71   62   74
1963   76   64   62   57
1964   80   73   69   69
1965   71   64   69   62
1966   63   46   56   44
1967   44   52   38   46
1968   36   49   35   44
1969   59   65   65   56
> window(presidents, deltat = 1)  # All Qtr1s
Time Series:
Start = 1945 
End = 1974 
Frequency = 1 
 [1] NA 63 35 36 69 45 36 25 59 71 71 76 79 60 57 71 72 79 76 80 71 63 44 36 59
[26] 66 51 49 68 28
> window(presidents, start = c(1945,3), deltat = 1)  # All Qtr3s
Time Series:
Start = 1945.5 
End = 1974.5 
Frequency = 1 
 [1] 82 43 54 NA 57 46 32 NA 75 71 79 67 63 48 61 61 71 62 62 69 69 56 38 35 65
[26] 61 54 NA 40 24
> window(presidents, 1944, c(1979,2), extend = TRUE)
     Qtr1 Qtr2 Qtr3 Qtr4
1944   NA   NA   NA   NA
1945   NA   87   82   75
1946   63   50   43   32
1947   35   60   54   55
1948   36   39   NA   NA
1949   69   57   57   51
1950   45   37   46   39
1951   36   24   32   23
1952   25   32   NA   32
1953   59   74   75   60
1954   71   61   71   57
1955   71   68   79   73
1956   76   71   67   75
1957   79   62   63   57
1958   60   49   48   52
1959   57   62   61   66
1960   71   62   61   57
1961   72   83   71   78
1962   79   71   62   74
1963   76   64   62   57
1964   80   73   69   69
1965   71   64   69   62
1966   63   46   56   44
1967   44   52   38   46
1968   36   49   35   44
1969   59   65   65   56
1970   66   53   61   52
1971   51   48   54   49
1972   49   61   NA   NA
1973   68   44   40   27
1974   28   25   24   24
1975   NA   NA   NA   NA
1976   NA   NA   NA   NA
1977   NA   NA   NA   NA
1978   NA   NA   NA   NA
1979   NA   NA          
> 
> pres <- window(presidents, 1945, c(1949,4)) # values in the 1940's
> window(pres, 1945.25, 1945.50) <- c(60, 70)
> window(pres, 1944, 1944.75) <- 0 # will generate a warning
Warning: extending time series when replacing values
> window(pres, c(1945,4), c(1949,4), frequency = 1) <- 85:89
> pres
     Qtr1 Qtr2 Qtr3 Qtr4
1944    0    0    0    0
1945   NA   60   70   85
1946   63   50   43   86
1947   35   60   54   87
1948   36   39   NA   88
1949   69   57   57   89
> 
> 
> 
> cleanEx()
> nameEx("xtabs")
> ### * xtabs
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: xtabs
> ### Title: Cross Tabulation
> ### Aliases: xtabs print.xtabs
> ### Keywords: category
> 
> ### ** Examples
> 
> ## 'esoph' has the frequencies of cases and controls for all levels of
> ## the variables 'agegp', 'alcgp', and 'tobgp'.
> xtabs(cbind(ncases, ncontrols) ~ ., data = esoph)
, , tobgp = 0-9g/day,  = ncases

       alcgp
agegp   0-39g/day 40-79 80-119 120+
  25-34         0     0      0    0
  35-44         0     0      0    2
  45-54         1     6      3    4
  55-64         2     9      9    5
  65-74         5    17      6    3
  75+           1     2      1    2

, , tobgp = 10-19,  = ncases

       alcgp
agegp   0-39g/day 40-79 80-119 120+
  25-34         0     0      0    1
  35-44         1     3      0    0
  45-54         0     4      6    3
  55-64         3     6      8    6
  65-74         4     3      4    1
  75+           2     1      1    1

, , tobgp = 20-29,  = ncases

       alcgp
agegp   0-39g/day 40-79 80-119 120+
  25-34         0     0      0    0
  35-44         0     1      0    2
  45-54         0     5      1    2
  55-64         3     4      3    2
  65-74         2     5      2    1
  75+           0     0      0    0

, , tobgp = 30+,  = ncases

       alcgp
agegp   0-39g/day 40-79 80-119 120+
  25-34         0     0      0    0
  35-44         0     0      0    0
  45-54         0     5      2    4
  55-64         4     3      4    5
  65-74         0     0      1    1
  75+           1     1      0    0

, , tobgp = 0-9g/day,  = ncontrols

       alcgp
agegp   0-39g/day 40-79 80-119 120+
  25-34        40    27      2    1
  35-44        60    35     11    1
  45-54        45    32     13    0
  55-64        47    31      9    5
  65-74        43    17      7    1
  75+          17     3      0    0

, , tobgp = 10-19,  = ncontrols

       alcgp
agegp   0-39g/day 40-79 80-119 120+
  25-34        10     7      1    0
  35-44        13    20      6    3
  45-54        18    17      8    1
  55-64        19    15      7    1
  65-74        10     7      8    1
  75+           4     2      0    0

, , tobgp = 20-29,  = ncontrols

       alcgp
agegp   0-39g/day 40-79 80-119 120+
  25-34         6     4      0    1
  35-44         7    13      2    2
  45-54        10    10      4    1
  55-64         9    13      3    1
  65-74         5     4      1    0
  75+           0     3      0    0

, , tobgp = 30+,  = ncontrols

       alcgp
agegp   0-39g/day 40-79 80-119 120+
  25-34         5     7      2    2
  35-44         8     8      1    0
  45-54         4     2      2    0
  55-64         2     3      0    1
  65-74         2     0      0    0
  75+           2     0      0    0

> ## Output is not really helpful ... flat tables are better:
> ftable(xtabs(cbind(ncases, ncontrols) ~ ., data = esoph))
                          ncases ncontrols
agegp alcgp     tobgp                     
25-34 0-39g/day 0-9g/day       0        40
                10-19          0        10
                20-29          0         6
                30+            0         5
      40-79     0-9g/day       0        27
                10-19          0         7
                20-29          0         4
                30+            0         7
      80-119    0-9g/day       0         2
                10-19          0         1
                20-29          0         0
                30+            0         2
      120+      0-9g/day       0         1
                10-19          1         0
                20-29          0         1
                30+            0         2
35-44 0-39g/day 0-9g/day       0        60
                10-19          1        13
                20-29          0         7
                30+            0         8
      40-79     0-9g/day       0        35
                10-19          3        20
                20-29          1        13
                30+            0         8
      80-119    0-9g/day       0        11
                10-19          0         6
                20-29          0         2
                30+            0         1
      120+      0-9g/day       2         1
                10-19          0         3
                20-29          2         2
                30+            0         0
45-54 0-39g/day 0-9g/day       1        45
                10-19          0        18
                20-29          0        10
                30+            0         4
      40-79     0-9g/day       6        32
                10-19          4        17
                20-29          5        10
                30+            5         2
      80-119    0-9g/day       3        13
                10-19          6         8
                20-29          1         4
                30+            2         2
      120+      0-9g/day       4         0
                10-19          3         1
                20-29          2         1
                30+            4         0
55-64 0-39g/day 0-9g/day       2        47
                10-19          3        19
                20-29          3         9
                30+            4         2
      40-79     0-9g/day       9        31
                10-19          6        15
                20-29          4        13
                30+            3         3
      80-119    0-9g/day       9         9
                10-19          8         7
                20-29          3         3
                30+            4         0
      120+      0-9g/day       5         5
                10-19          6         1
                20-29          2         1
                30+            5         1
65-74 0-39g/day 0-9g/day       5        43
                10-19          4        10
                20-29          2         5
                30+            0         2
      40-79     0-9g/day      17        17
                10-19          3         7
                20-29          5         4
                30+            0         0
      80-119    0-9g/day       6         7
                10-19          4         8
                20-29          2         1
                30+            1         0
      120+      0-9g/day       3         1
                10-19          1         1
                20-29          1         0
                30+            1         0
75+   0-39g/day 0-9g/day       1        17
                10-19          2         4
                20-29          0         0
                30+            1         2
      40-79     0-9g/day       2         3
                10-19          1         2
                20-29          0         3
                30+            1         0
      80-119    0-9g/day       1         0
                10-19          1         0
                20-29          0         0
                30+            0         0
      120+      0-9g/day       2         0
                10-19          1         0
                20-29          0         0
                30+            0         0
> ## In particular if we have fewer factors ...
> ftable(xtabs(cbind(ncases, ncontrols) ~ agegp, data = esoph))
       ncases ncontrols
agegp                  
25-34       1       115
35-44       9       190
45-54      46       167
55-64      76       166
65-74      55       106
75+        13        31
> 
> ## This is already a contingency table in array form.
> DF <- as.data.frame(UCBAdmissions)
> ## Now 'DF' is a data frame with a grid of the factors and the counts
> ## in variable 'Freq'.
> DF
      Admit Gender Dept Freq
1  Admitted   Male    A  512
2  Rejected   Male    A  313
3  Admitted Female    A   89
4  Rejected Female    A   19
5  Admitted   Male    B  353
6  Rejected   Male    B  207
7  Admitted Female    B   17
8  Rejected Female    B    8
9  Admitted   Male    C  120
10 Rejected   Male    C  205
11 Admitted Female    C  202
12 Rejected Female    C  391
13 Admitted   Male    D  138
14 Rejected   Male    D  279
15 Admitted Female    D  131
16 Rejected Female    D  244
17 Admitted   Male    E   53
18 Rejected   Male    E  138
19 Admitted Female    E   94
20 Rejected Female    E  299
21 Admitted   Male    F   22
22 Rejected   Male    F  351
23 Admitted Female    F   24
24 Rejected Female    F  317
> ## Nice for taking margins ...
> xtabs(Freq ~ Gender + Admit, DF)
        Admit
Gender   Admitted Rejected
  Male       1198     1493
  Female      557     1278
> ## And for testing independence ...
> summary(xtabs(Freq ~ ., DF))
Call: xtabs(formula = Freq ~ ., data = DF)
Number of cases in table: 4526 
Number of factors: 3 
Test for independence of all factors:
	Chisq = 2000.3, df = 16, p-value = 0
> 
> ## with NA's
> DN <- DF; DN[cbind(6:9, c(1:2,4,1))] <- NA
> DN # 'Freq' is missing only for (Rejected, Female, B)
      Admit Gender Dept Freq
1  Admitted   Male    A  512
2  Rejected   Male    A  313
3  Admitted Female    A   89
4  Rejected Female    A   19
5  Admitted   Male    B  353
6      <NA>   Male    B  207
7  Admitted   <NA>    B   17
8  Rejected Female    B   NA
9      <NA>   Male    C  120
10 Rejected   Male    C  205
11 Admitted Female    C  202
12 Rejected Female    C  391
13 Admitted   Male    D  138
14 Rejected   Male    D  279
15 Admitted Female    D  131
16 Rejected Female    D  244
17 Admitted   Male    E   53
18 Rejected   Male    E  138
19 Admitted Female    E   94
20 Rejected Female    E  299
21 Admitted   Male    F   22
22 Rejected   Male    F  351
23 Admitted Female    F   24
24 Rejected Female    F  317
> (xtNA <- xtabs(Freq ~ Gender + Admit, DN))     # NA prints 'invisibly'
        Admit
Gender   Admitted Rejected
  Male       1078     1286
  Female      540         
> print(xtNA, na.print = "NA")                   # show NA's better
        Admit
Gender   Admitted Rejected
  Male       1078     1286
  Female      540       NA
> xtabs(Freq ~ Gender + Admit, DN, na.rm = TRUE) # ignore missing Freq
        Admit
Gender   Admitted Rejected
  Male       1078     1286
  Female      540     1270
> ## Use addNA = TRUE to tabulate missing factor levels:
> xtabs(Freq ~ Gender + Admit, DN, addNA = TRUE)
        Admit
Gender   Admitted Rejected <NA>
  Male       1078     1286  327
  Female      540             0
  <NA>         17        0    0
> xtabs(Freq ~ Gender + Admit, DN, addNA = TRUE, na.rm = TRUE)
        Admit
Gender   Admitted Rejected <NA>
  Male       1078     1286  327
  Female      540     1270    0
  <NA>         17        0    0
> ## na.action = na.omit removes all rows with NAs right from the start:
> xtabs(Freq ~ Gender + Admit, DN, na.action = na.omit)
        Admit
Gender   Admitted Rejected
  Male       1078     1286
  Female      540     1270
> 
> ## Create a nice display for the warp break data.
> warpbreaks$replicate <- rep_len(1:9, 54)
> ftable(xtabs(breaks ~ wool + tension + replicate, data = warpbreaks))
             replicate  1  2  3  4  5  6  7  8  9
wool tension                                     
A    L                 26 30 54 25 70 52 51 26 67
     M                 18 21 29 17 12 18 35 30 36
     H                 36 21 24 18 10 43 28 15 26
B    L                 27 14 29 19 29 31 41 20 44
     M                 42 26 19 16 39 28 21 39 29
     H                 20 21 24 17 13 15 15 16 28
> 
> ### ---- Sparse Examples ----
> 
> 
> 
> 
> cleanEx()
> nameEx("zC")
> ### * zC
> 
> flush(stderr()); flush(stdout())
> 
> ### Name: C
> ### Title: Sets Contrasts for a Factor
> ### Aliases: C
> ### Keywords: models
> 
> ### ** Examples
> 
> ## reset contrasts to defaults
> options(contrasts = c("contr.treatment", "contr.poly"))
> tens <- with(warpbreaks, C(tension, poly, 1))
> ## tension SHOULD be an ordered factor, but as it is not we can use
> aov(breaks ~ wool + tens + tension, data = warpbreaks)
Call:
   aov(formula = breaks ~ wool + tens + tension, data = warpbreaks)

Terms:
                    wool     tens  tension Residuals
Sum of Squares   450.667 1950.694   83.565  6747.889
Deg. of Freedom        1        1        1        50

Residual standard error: 11.61713
1 out of 5 effects not estimable
Estimated effects may be unbalanced
> 
> ## show the use of ...  The default contrast is contr.treatment here
> summary(lm(breaks ~ wool + C(tension, base = 2), data = warpbreaks))

Call:
lm(formula = breaks ~ wool + C(tension, base = 2), data = warpbreaks)

Residuals:
    Min      1Q  Median      3Q     Max 
-19.500  -8.083  -2.139   6.472  30.722 

Coefficients:
                      Estimate Std. Error t value Pr(>|t|)    
(Intercept)             29.278      3.162   9.260    2e-12 ***
woolB                   -5.778      3.162  -1.827   0.0736 .  
C(tension, base = 2)L   10.000      3.872   2.582   0.0128 *  
C(tension, base = 2)H   -4.722      3.872  -1.219   0.2284    
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

Residual standard error: 11.62 on 50 degrees of freedom
Multiple R-squared:  0.2691,	Adjusted R-squared:  0.2253 
F-statistic: 6.138 on 3 and 50 DF,  p-value: 0.00123

> 
> 
> # following on from help(esoph)
> model3 <- glm(cbind(ncases, ncontrols) ~ agegp + C(tobgp, , 1) +
+      C(alcgp, , 1), data = esoph, family = binomial())
> summary(model3)

Call:
glm(formula = cbind(ncases, ncontrols) ~ agegp + C(tobgp, , 1) + 
    C(alcgp, , 1), family = binomial(), data = esoph)

Coefficients:
                Estimate Std. Error z value Pr(>|z|)    
(Intercept)     -1.15264    0.20326  -5.671 1.42e-08 ***
agegp.L          3.81892    0.67862   5.627 1.83e-08 ***
agegp.Q         -1.49473    0.60671  -2.464   0.0138 *  
agegp.C          0.07923    0.46318   0.171   0.8642    
agegp^4          0.12136    0.32203   0.377   0.7063    
agegp^5         -0.24856    0.21153  -1.175   0.2400    
C(tobgp, , 1).L  0.98287    0.21519   4.568 4.93e-06 ***
C(alcgp, , 1).L  2.38736    0.23462  10.175  < 2e-16 ***
---
Signif. codes:  0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1

(Dispersion parameter for binomial family taken to be 1)

    Null deviance: 367.953  on 87  degrees of freedom
Residual deviance:  91.121  on 80  degrees of freedom
AIC: 222.18

Number of Fisher Scoring iterations: 6

> 
> 
> 
> base::options(contrasts = c(unordered = "contr.treatment",ordered = "contr.poly"))
> ### * <FOOTER>
> ###
> cleanEx()
> options(digits = 7L)
> base::cat("Time elapsed: ", proc.time() - base::get("ptime", pos = 'CheckExEnv'),"\n")
Time elapsed:  2.846 0.215 3.067 0 0 
> grDevices::dev.off()
null device 
          1 
> ###
> ### Local variables: ***
> ### mode: outline-minor ***
> ### outline-regexp: "\\(> \\)?### [*]+" ***
> ### End: ***
> quit('no')