R version 3.2.2 RC (2015-08-08 r68914) -- "Fire Safety" Copyright (C) 2015 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > #### "All the examples" from ./R-intro.texi > #### -- in a way that this should be(come) an executable script. > > options(digits=5, width=65)##--- for outputs ! > options(stringsAsFactors=TRUE) ## factory-fresh defaults > options(useFancyQuotes=FALSE) ## avoid problems on Windows > > ## 2. Simple Manipulations > > x <- c(10.4, 5.6, 3.1, 6.4, 21.7) > assign("x", c(10.4, 5.6, 3.1, 6.4, 21.7)) > c(10.4, 5.6, 3.1, 6.4, 21.7) -> x > .Last.value [1] 10.4 5.6 3.1 6.4 21.7 > 1/x [1] 0.096154 0.178571 0.322581 0.156250 0.046083 > > y <- c(x, 0, x) > v <- 2*x + y + 1 Warning message: In 2 * x + y : longer object length is not a multiple of shorter object length > ##- Warning message: > ##- longer object length > ##- is not a multiple of shorter object length in: 2 * x + y > > sqrt(-17) [1] NaN Warning message: In sqrt(-17) : NaNs produced > ##- [1] NaN > ##- Warning message: > ##- NaNs produced in: sqrt(-17) > > sqrt(-17+0i) [1] 0+4.1231i > > ###-- 2.3 .. regular sequences > > 1:30 [1] 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 [21] 21 22 23 24 25 26 27 28 29 30 > > n <- 10 > > 1:n-1 [1] 0 1 2 3 4 5 6 7 8 9 > 1:(n-1) [1] 1 2 3 4 5 6 7 8 9 > 30:1 [1] 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 [21] 10 9 8 7 6 5 4 3 2 1 > > seq(2,10) [1] 2 3 4 5 6 7 8 9 10 > all(seq(1,30) == seq(to=30, from=1)) [1] TRUE > > seq(-5, 5, by=.2) -> s3 > s4 <- seq(length=51, from=-5, by=.2) > all.equal(s3,s4) [1] TRUE > > s5 <- rep(x, times=5) > s6 <- rep(x, each=5) > > temp <- x > 13 > > z <- c(1:3,NA); ind <- is.na(z) > > 0/0 [1] NaN > Inf - Inf [1] NaN > > labs <- paste(c("X","Y"), 1:10, sep="") > labs [1] "X1" "Y2" "X3" "Y4" "X5" "Y6" "X7" "Y8" "X9" "Y10" > > x <- c(z,z-2)#-- NOT in texi ; more interesting > y <- x[!is.na(x)] > > (x+1)[(!is.na(x)) & x>0] -> z > z [1] 2 3 4 2 > > x <- c(x, 9:12)# long enough: > x[1:10] [1] 1 2 3 NA -1 0 1 NA 9 10 > > c("x","y")[rep(c(1,2,2,1), times=4)] [1] "x" "y" "y" "x" "x" "y" "y" "x" "x" "y" "y" "x" "x" "y" "y" [16] "x" > > y <- x[-(1:5)] > y [1] 0 1 NA 9 10 11 12 > > fruit <- c(5, 10, 1, 20) > names(fruit) <- c("orange", "banana", "apple", "peach") > fruit orange banana apple peach 5 10 1 20 > > lunch <- fruit[c("apple","orange")] > lunch apple orange 1 5 > > x [1] 1 2 3 NA -1 0 1 NA 9 10 11 12 > x[is.na(x)] <- 0 > x [1] 1 2 3 0 -1 0 1 0 9 10 11 12 > > y <- -4:9 > y[y < 0] <- -y[y < 0] > all(y == abs(y)) [1] TRUE > y [1] 4 3 2 1 0 1 2 3 4 5 6 7 8 9 > > ###--------------- > > z <- 0:9 > digits <- as.character(z) > digits [1] "0" "1" "2" "3" "4" "5" "6" "7" "8" "9" > d <- as.integer(digits) > all.equal(z, d) [1] TRUE > > e <- numeric() > e[3] <- 17 > e [1] NA NA 17 > > alpha <- 10*(1:10) > alpha <- alpha[2 * 1:5] > alpha [1] 20 40 60 80 100 > > winter <- data.frame(temp = c(-1,3,2,-2), cat = rep(c("A","B"), 2)) > winter temp cat 1 -1 A 2 3 B 3 2 A 4 -2 B > unclass(winter) $temp [1] -1 3 2 -2 $cat [1] A B A B Levels: A B attr(,"row.names") [1] 1 2 3 4 > > ###------------ Ordered and unordered factors -------- > state <- c("tas", "sa", "qld", "nsw", "nsw", "nt", "wa", "wa", + "qld", "vic", "nsw", "vic", "qld", "qld", "sa", "tas", + "sa", "nt", "wa", "vic", "qld", "nsw", "nsw", "wa", + "sa", "act", "nsw", "vic", "vic", "act") > statef <- factor(state) > statef [1] tas sa qld nsw nsw nt wa wa qld vic nsw vic qld qld sa [16] tas sa nt wa vic qld nsw nsw wa sa act nsw vic vic act Levels: act nsw nt qld sa tas vic wa > > levels(statef) [1] "act" "nsw" "nt" "qld" "sa" "tas" "vic" "wa" > > incomes <- c(60, 49, 40, 61, 64, 60, 59, 54, 62, 69, 70, 42, 56, + 61, 61, 61, 58, 51, 48, 65, 49, 49, 41, 48, 52, 46, + 59, 46, 58, 43) > > incmeans <- tapply(incomes, statef, mean) > incmeans act nsw nt qld sa tas vic wa 44.500 57.333 55.500 53.600 55.000 60.500 56.000 52.250 > > stderr <- function(x) sqrt(var(x)/length(x)) > > incster <- tapply(incomes, statef, stderr) > incster act nsw nt qld sa tas vic wa 1.5000 4.3102 4.5000 4.1061 2.7386 0.5000 5.2440 2.6575 > > ## > z <- 1:1500 > dim(z) <- c(3,5,100) > > > x <- array(1:20,dim=c(4,5)) # Generate a 4 by 5 array. > x [,1] [,2] [,3] [,4] [,5] [1,] 1 5 9 13 17 [2,] 2 6 10 14 18 [3,] 3 7 11 15 19 [4,] 4 8 12 16 20 > > i <- array(c(1:3,3:1),dim=c(3,2)) > i # @code{i} is a 3 by 2 index array. [,1] [,2] [1,] 1 3 [2,] 2 2 [3,] 3 1 > > x[i] # Extract those elements [1] 9 6 3 > > x[i] <- 0 # Replace those elements by zeros. > x [,1] [,2] [,3] [,4] [,5] [1,] 1 5 0 13 17 [2,] 2 0 10 14 18 [3,] 0 7 11 15 19 [4,] 4 8 12 16 20 > > n <- 60 > b <- 5 ; blocks <- rep(1:b, length= n) > v <- 6 ; varieties <- gl(v,10) > > Xb <- matrix(0, n, b) > Xv <- matrix(0, n, v) > ib <- cbind(1:n, blocks) > iv <- cbind(1:n, varieties) > Xb[ib] <- 1 > Xv[iv] <- 1 > X <- cbind(Xb, Xv) > > N <- crossprod(Xb, Xv) > table(blocks,varieties) varieties blocks 1 2 3 4 5 6 1 2 2 2 2 2 2 2 2 2 2 2 2 2 3 2 2 2 2 2 2 4 2 2 2 2 2 2 5 2 2 2 2 2 2 > all(N == table(blocks,varieties)) [1] TRUE > > h <- 1:17 > Z <- array(h, dim=c(3,4,2)) > ## If the size of 'h' is exactly 24 > h <- rep(h, length = 24) > Z. <- Z ## the result is the same as > Z <- h; dim(Z) <- c(3,4,2) > stopifnot(identical(Z., Z)) > > Z <- array(0, c(3,4,2)) > > ## So if @code{A}, @code{B} and @code{C} are all similar arrays > ## > A <- matrix(1:6, 3,2) > B <- cbind(1, 1:3) > C <- rbind(1, rbind(2, 3:4)) > stopifnot(dim(A) == dim(B), + dim(B) == dim(C)) > ## > D <- 2*A*B + C + 1 > > a <- 1:9 > b <- 10*(1:3) > > ab <- a %o% b > stopifnot(ab == outer(a,b,"*"), + ab == outer(a,b)) > > x <- 1:10 > y <- -2:2 > f <- function(x, y) cos(y)/(1 + x^2) > z <- outer(x, y, f) > > > d <- outer(0:9, 0:9) > fr <- table(outer(d, d, "-")) > plot(as.numeric(names(fr)), fr, type="h", + xlab="Determinant", ylab="Frequency") > > ## > > B <- aperm(A, c(2,1)) > stopifnot(identical(B, t(A))) > > ## for example, @code{A} and @code{B} are square matrices of the same size > ## > A <- matrix(1:4, 2,2) > B <- A - 1 > ## > > A * B [,1] [,2] [1,] 0 6 [2,] 2 12 > > A %*% B [,1] [,2] [1,] 3 11 [2,] 4 16 > > > ## > x <- c(-1, 2) > ## > x %*% A %*% x [,1] [1,] 7 > > x %*% x [,1] [1,] 5 > stopifnot(x %*% x == sum(x^2)) > > xxT <- cbind(x) %*% x > xxT [,1] [,2] [1,] 1 -2 [2,] -2 4 > stopifnot(identical(xxT, x %*% rbind(x))) > > ## crossprod ... (ADD) > > ## diag ... (ADD) > > ## linear equations ... (ADD) > > ## solve ... (ADD) > > ## eigen: > ## a symmetric matrix @code{Sm} > ## > Sm <- matrix(-2:6, 3); Sm <- (Sm + t(Sm))/4; Sm [,1] [,2] [,3] [1,] -1 0 1 [2,] 0 1 2 [3,] 1 2 3 > ## > ev <- eigen(Sm) > > evals <- eigen(Sm)$values > > ## SVD ..... > > ## "if M is in fact square, then, ..." > ## > M <- cbind(1,1:3,c(5,2,3)) > X <- cbind(1:9, .25*(-4:4)^2) > X1 <- cbind(1:7, -1) > X2 <- cbind(0,2:8) > y <- c(1:4, 2:6) > ## > > absdetM <- prod(svd(M)$d) > stopifnot(all.equal(absdetM, abs(det(M))))# since det() nowadays exists > > ans <- lsfit(X, y) > > Xplus <- qr(X) > b <- qr.coef(Xplus, y) > fit <- qr.fitted(Xplus, y) > res <- qr.resid(Xplus, y) > ## > > X <- cbind(1, X1, X2) > > vec <- as.vector(X) > vec <- c(X) > > statefr <- table(statef) > statefr statef act nsw nt qld sa tas vic wa 2 6 2 5 4 2 5 4 > statefr <- tapply(statef, statef, length) > statefr act nsw nt qld sa tas vic wa 2 6 2 5 4 2 5 4 > > factor(cut(incomes, breaks = 35+10*(0:7))) -> incomef > table(incomef,statef) statef incomef act nsw nt qld sa tas vic wa (35,45] 1 1 0 1 0 0 1 0 (45,55] 1 1 1 1 2 0 1 3 (55,65] 0 3 1 3 2 2 2 1 (65,75] 0 1 0 0 0 0 1 0 > > ###--- @chapter 6. Lists and data frames > > Lst <- list(name="Fred", wife="Mary", no.children=3, + child.ages=c(4,7,9)) > Lst$name [1] "Fred" > Lst$wife [1] "Mary" > Lst$child.ages[1] [1] 4 > stopifnot(Lst$name == Lst[[1]], Lst[[1]] == "Fred", + Lst$child.ages[1] == Lst[[4]][1], Lst[[4]][1] == 4 + ) > > x <- "name" ; Lst[[x]] [1] "Fred" > > ## @section 6.2 Constructing and modifying lists > > ## > Mat <- cbind(1, 2:4) > ## > Lst[5] <- list(matrix=Mat) > > ## @section 6.3 Data frames > > accountants <- data.frame(home=statef, loot=incomes, shot=incomef) > ## MM: add the next lines to R-intro.texi ! > accountants home loot shot 1 tas 60 (55,65] 2 sa 49 (45,55] 3 qld 40 (35,45] 4 nsw 61 (55,65] 5 nsw 64 (55,65] 6 nt 60 (55,65] 7 wa 59 (55,65] 8 wa 54 (45,55] 9 qld 62 (55,65] 10 vic 69 (65,75] 11 nsw 70 (65,75] 12 vic 42 (35,45] 13 qld 56 (55,65] 14 qld 61 (55,65] 15 sa 61 (55,65] 16 tas 61 (55,65] 17 sa 58 (55,65] 18 nt 51 (45,55] 19 wa 48 (45,55] 20 vic 65 (55,65] 21 qld 49 (45,55] 22 nsw 49 (45,55] 23 nsw 41 (35,45] 24 wa 48 (45,55] 25 sa 52 (45,55] 26 act 46 (45,55] 27 nsw 59 (55,65] 28 vic 46 (45,55] 29 vic 58 (55,65] 30 act 43 (35,45] > str(accountants) 'data.frame': 30 obs. of 3 variables: $ home: Factor w/ 8 levels "act","nsw","nt",..: 6 5 4 2 2 3 8 8 4 7 ... $ loot: num 60 49 40 61 64 60 59 54 62 69 ... $ shot: Factor w/ 4 levels "(35,45]","(45,55]",..: 3 2 1 3 3 3 3 2 3 4 ... > > ## .......... > > ###--- @chapter 8. Probability distributions > > ## 2-tailed p-value for t distribution > 2*pt(-2.43, df = 13) [1] 0.030331 > ## upper 1% point for an F(2, 7) distribution > qf(0.01, 2, 7, lower.tail = FALSE) [1] 9.5466 > > attach(faithful) > summary(eruptions) Min. 1st Qu. Median Mean 3rd Qu. Max. 1.60 2.16 4.00 3.49 4.45 5.10 > > fivenum(eruptions) [1] 1.6000 2.1585 4.0000 4.4585 5.1000 > > stem(eruptions) The decimal point is 1 digit(s) to the left of the | 16 | 070355555588 18 | 000022233333335577777777888822335777888 20 | 00002223378800035778 22 | 0002335578023578 24 | 00228 26 | 23 28 | 080 30 | 7 32 | 2337 34 | 250077 36 | 0000823577 38 | 2333335582225577 40 | 0000003357788888002233555577778 42 | 03335555778800233333555577778 44 | 02222335557780000000023333357778888 46 | 0000233357700000023578 48 | 00000022335800333 50 | 0370 > > hist(eruptions) > > ## postscript("images/hist.eps", ...) > # make the bins smaller, make a plot of density > hist(eruptions, seq(1.6, 5.2, 0.2), prob=TRUE) > lines(density(eruptions, bw=0.1)) > rug(eruptions) # show the actual data points > ## dev.off() > > plot(ecdf(eruptions), do.points=FALSE, verticals=TRUE) > > ## postscript("images/ecdf.eps", ...) > long <- eruptions[eruptions > 3] > plot(ecdf(long), do.points=FALSE, verticals=TRUE) > x <- seq(3, 5.4, 0.01) > lines(x, pnorm(x, mean=mean(long), sd=sqrt(var(long))), lty=3) > ## dev.off() > > par(pty="s") # arrange for a square figure region > qqnorm(long); qqline(long) > > x <- rt(250, df = 5) > qqnorm(x); qqline(x) > > qqplot(qt(ppoints(250), df = 5), x, xlab = "Q-Q plot for t dsn") > qqline(x) > > shapiro.test(long) Shapiro-Wilk normality test data: long W = 0.979, p-value = 0.011 > > ks.test(long, "pnorm", mean = mean(long), sd = sqrt(var(long))) One-sample Kolmogorov-Smirnov test data: long D = 0.0661, p-value = 0.43 alternative hypothesis: two-sided Warning message: In ks.test(long, "pnorm", mean = mean(long), sd = sqrt(var(long))) : ties should not be present for the Kolmogorov-Smirnov test > > ##@section One- and two-sample tests > > ## scan() from stdin : > ## can be cut & pasted, but not parsed and hence not source()d > ##scn A <- scan() > ##scn 79.98 80.04 80.02 80.04 80.03 80.03 80.04 79.97 > ##scn 80.05 80.03 80.02 80.00 80.02 > A <- c(79.98, 80.04, 80.02, 80.04, 80.03, 80.03, 80.04, 79.97, + 80.05, 80.03, 80.02, 80, 80.02) > ##scn B <- scan() > ##scn 80.02 79.94 79.98 79.97 79.97 80.03 79.95 79.97 > B <- c(80.02, 79.94, 79.98, 79.97, 79.97, 80.03, 79.95, 79.97) > > ## postscript("images/ice.eps", ...) > boxplot(A, B) > ## dev.off() > > t.test(A, B) Welch Two Sample t-test data: A and B t = 3.25, df = 12, p-value = 0.0069 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: 0.013855 0.070183 sample estimates: mean of x mean of y 80.021 79.979 > > var.test(A, B) F test to compare two variances data: A and B F = 0.584, num df = 12, denom df = 7, p-value = 0.39 alternative hypothesis: true ratio of variances is not equal to 1 95 percent confidence interval: 0.12511 2.10527 sample estimates: ratio of variances 0.58374 > > t.test(A, B, var.equal=TRUE) Two Sample t-test data: A and B t = 3.47, df = 19, p-value = 0.0026 alternative hypothesis: true difference in means is not equal to 0 95 percent confidence interval: 0.016691 0.067348 sample estimates: mean of x mean of y 80.021 79.979 > > wilcox.test(A, B) Wilcoxon rank sum test with continuity correction data: A and B W = 89, p-value = 0.0075 alternative hypothesis: true location shift is not equal to 0 Warning message: In wilcox.test.default(A, B) : cannot compute exact p-value with ties > > plot(ecdf(A), do.points=FALSE, verticals=TRUE, xlim=range(A, B)) > plot(ecdf(B), do.points=FALSE, verticals=TRUE, add=TRUE) > > ###--- @chapter Grouping, loops and conditional execution > > > ###--- @chapter Writing your own functions > > > ###--- @chapter Statistical models in R > > > ###--- @chapter Graphical procedures > > ###--- @appendix A sample session > > ## "Simulate starting a new R session, by > rm(list=ls(all=TRUE)) > set.seed(123) # for repeatability > > if(interactive()) + help.start() > > x <- rnorm(50) > y <- rnorm(x) > plot(x, y) > ls() [1] "x" "y" > rm(x, y) > x <- 1:20 > w <- 1 + sqrt(x)/2 > dummy <- data.frame(x = x, y = x + rnorm(x)*w) > dummy x y 1 1 -0.06561 2 2 2.43853 3 3 2.53967 4 4 3.30491 5 5 2.98444 6 6 5.89982 7 7 5.17676 8 8 3.97323 9 9 8.04943 10 10 12.37206 11 11 9.47055 12 12 13.66099 13 13 8.46544 14 14 13.84049 15 15 16.52523 16 16 16.90346 17 17 17.32353 18 18 16.00015 19 19 16.29841 20 20 16.68585 > fm <- lm(y ~ x, data=dummy) > summary(fm) Call: lm(formula = y ~ x, data = dummy) Residuals: Min 1Q Median 3Q Max -3.540 -1.103 -0.054 1.152 3.262 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.5431 0.8902 -0.61 0.55 x 0.9653 0.0743 12.99 1.4e-10 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 1.92 on 18 degrees of freedom Multiple R-squared: 0.904, Adjusted R-squared: 0.898 F-statistic: 169 on 1 and 18 DF, p-value: 1.39e-10 > fm1 <- lm(y ~ x, data=dummy, weight=1/w^2) > summary(fm1) Call: lm(formula = y ~ x, data = dummy, weights = 1/w^2) Weighted Residuals: Min 1Q Median 3Q Max -1.3205 -0.4492 -0.0088 0.5088 1.2656 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -0.6155 0.6513 -0.94 0.36 x 0.9721 0.0664 14.64 1.9e-11 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.72 on 18 degrees of freedom Multiple R-squared: 0.922, Adjusted R-squared: 0.918 F-statistic: 214 on 1 and 18 DF, p-value: 1.94e-11 > attach(dummy) The following object is masked _by_ .GlobalEnv: x > lrf <- lowess(x, y) > plot(x, y) > lines(x, lrf$y) > abline(0, 1, lty=3) > abline(coef(fm)) > abline(coef(fm1), col = "red") > detach()# dummy > > plot(fitted(fm), resid(fm), + xlab="Fitted values", + ylab="Residuals", + main="Residuals vs Fitted") > qqnorm(resid(fm), main="Residuals Rankit Plot") > rm(fm, fm1, lrf, x, dummy) > > > filepath <- system.file("data", "morley.tab" , package="datasets") > if(interactive()) file.show(filepath) > mm <- read.table(filepath) > mm Expt Run Speed 001 1 1 850 002 1 2 740 003 1 3 900 004 1 4 1070 005 1 5 930 006 1 6 850 007 1 7 950 008 1 8 980 009 1 9 980 010 1 10 880 011 1 11 1000 012 1 12 980 013 1 13 930 014 1 14 650 015 1 15 760 016 1 16 810 017 1 17 1000 018 1 18 1000 019 1 19 960 020 1 20 960 021 2 1 960 022 2 2 940 023 2 3 960 024 2 4 940 025 2 5 880 026 2 6 800 027 2 7 850 028 2 8 880 029 2 9 900 030 2 10 840 031 2 11 830 032 2 12 790 033 2 13 810 034 2 14 880 035 2 15 880 036 2 16 830 037 2 17 800 038 2 18 790 039 2 19 760 040 2 20 800 041 3 1 880 042 3 2 880 043 3 3 880 044 3 4 860 045 3 5 720 046 3 6 720 047 3 7 620 048 3 8 860 049 3 9 970 050 3 10 950 051 3 11 880 052 3 12 910 053 3 13 850 054 3 14 870 055 3 15 840 056 3 16 840 057 3 17 850 058 3 18 840 059 3 19 840 060 3 20 840 061 4 1 890 062 4 2 810 063 4 3 810 064 4 4 820 065 4 5 800 066 4 6 770 067 4 7 760 068 4 8 740 069 4 9 750 070 4 10 760 071 4 11 910 072 4 12 920 073 4 13 890 074 4 14 860 075 4 15 880 076 4 16 720 077 4 17 840 078 4 18 850 079 4 19 850 080 4 20 780 081 5 1 890 082 5 2 840 083 5 3 780 084 5 4 810 085 5 5 760 086 5 6 810 087 5 7 790 088 5 8 810 089 5 9 820 090 5 10 850 091 5 11 870 092 5 12 870 093 5 13 810 094 5 14 740 095 5 15 810 096 5 16 940 097 5 17 950 098 5 18 800 099 5 19 810 100 5 20 870 > mm$Expt <- factor(mm$Expt) > mm$Run <- factor(mm$Run) > attach(mm) > plot(Expt, Speed, main="Speed of Light Data", xlab="Experiment No.") > fm <- aov(Speed ~ Run + Expt, data=mm) > summary(fm) Df Sum Sq Mean Sq F value Pr(>F) Run 19 113344 5965 1.11 0.3632 Expt 4 94514 23629 4.38 0.0031 ** Residuals 76 410166 5397 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 > fm0 <- update(fm, . ~ . - Run) > anova(fm0, fm) Analysis of Variance Table Model 1: Speed ~ Expt Model 2: Speed ~ Run + Expt Res.Df RSS Df Sum of Sq F Pr(>F) 1 95 523510 2 76 410166 19 113344 1.11 0.36 > detach() > rm(fm, fm0) > > x <- seq(-pi, pi, len=50) > y <- x > f <- outer(x, y, function(x, y) cos(y)/(1 + x^2)) > oldpar <- par(no.readonly = TRUE) > par(pty="s") > contour(x, y, f) > contour(x, y, f, nlevels=15, add=TRUE) > fa <- (f-t(f))/2 > contour(x, y, fa, nlevels=15) > par(oldpar) > image(x, y, f) > image(x, y, fa) > objects(); rm(x, y, f, fa) [1] "f" "fa" "filepath" "mm" "oldpar" [6] "w" "x" "y" > th <- seq(-pi, pi, len=100) > z <- exp(1i*th) > par(pty="s") > plot(z, type="l") > w <- rnorm(100) + rnorm(100)*1i > w <- ifelse(Mod(w) > 1, 1/w, w) > plot(w, xlim=c(-1,1), ylim=c(-1,1), pch="+",xlab="x", ylab="y") > lines(z) > > w <- sqrt(runif(100))*exp(2*pi*runif(100)*1i) > plot(w, xlim=c(-1,1), ylim=c(-1,1), pch="+", xlab="x", ylab="y") > lines(z) > > rm(th, w, z) > ## q() > >