1  # grad case 1 and 2 are special cases of jacobian, with a scalar rather than
2  #   vector valued function. Case 3 differs only because of the interpretation
3  #   that the vector result is a scalar function applied to each argument, and the
4  #   thus the result has the same length as the argument.
5  #   The code of grad could be consolidated to use jacobian.
6  #   There is also some duplication in genD.
7
8############################################################################
9
10#    functions for gradient calculation
11
12############################################################################
13
14grad <- function (func, x, method="Richardson", side=NULL,
15   method.args=list(), ...) UseMethod("grad")
16
17grad.default <- function(func, x, method="Richardson", side=NULL,
18      method.args=list(), ...){
19  # modified by Paul Gilbert from code by Xingqiao Liu.
20  # case 1/ scalar arg, scalar result (case 2/ or 3/ code should work)
21  # case 2/ vector arg, scalar result (same as special case jacobian)
22  # case 3/ vector arg, vector result (of same length, really 1/ applied multiple times))
23  f <- func(x, ...)
24  n <- length(x)	 #number of variables in argument
25
26  if (is.null(side)) side <- rep(NA, n)
27  else {
28       if(n != length(side))
29          stop("Non-NULL argument 'side' should have the same length as x")
30       if(any(1 != abs(side[!is.na(side)])))
31          stop("Non-NULL argument 'side' should have values NA, +1, or -1.")
32       }
33
34  case1or3 <- n == length(f)
35
36  if((1 != length(f)) & !case1or3)
37  	 stop("grad assumes a scalar valued function.")
38
39  if(method=="simple"){
40    #  very simple numerical approximation
41    args <- list(eps=1e-4) # default
42    args[names(method.args)] <- method.args
43
44    side[is.na(side)] <- 1
45    eps <- rep(args$eps, n) * side
46
47    if(case1or3) return((func(x+eps, ...)-f)/eps)
48
49    # now case 2
50    df <- rep(NA,n)
51    for (i in 1:n) {
52      dx <- x
53      dx[i] <- dx[i] + eps[i]
54      df[i] <- (func(dx, ...) - f)/eps[i]
55     }
56    return(df)
57    }
58  else if(method=="complex"){ # Complex step gradient
59    if (any(!is.na(side))) stop("method 'complex' does not support non-NULL argument 'side'.")
60    eps <- .Machine$double.eps
61    v <- try(func(x + eps * 1i, ...))
62    if(inherits(v, "try-error"))
63      stop("function does not accept complex argument as required by method 'complex'.")
64    if(!is.complex(v))
65      stop("function does not return a complex value as required by method 'complex'.")
66
67    if(case1or3) return(Im(v)/eps)
68    # now case 2
69    h0 <- rep(0, n)
70    g  <- rep(NA, n)
71    for (i in 1:n) {
72      h0[i] <- eps * 1i
73      g[i] <- Im(func(x+h0, ...))/eps
74      h0[i]  <- 0
75      }
76    return(g)
77    }
78  else if(method=="Richardson"){
79    args <- list(eps=1e-4, d=0.0001, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2, show.details=FALSE) # default
80    args[names(method.args)] <- method.args
81    d <- args$d
82    r <- args$r
83    v <- args$v
84    show.details <- args$show.details
85    a <- matrix(NA, r, n)
86    #b <- matrix(NA, (r - 1), n)
87
88    #  first order derivatives are stored in the matrix a[k,i],
89    #  where the indexing variables k for rows(1 to r), i for columns (1 to n),
90    #  r is the number of iterations, and n is the number of variables.
91
92
93    h <- abs(d*x) + args$eps * (abs(x) < args$zero.tol)
94    pna <- (side == 1)  & !is.na(side) # double these on plus side
95    mna <- (side == -1) & !is.na(side) # double these on minus side
96
97    for(k in 1:r)  { # successively reduce h
98       ph <- mh <- h
99       ph[pna] <- 2 * ph[pna]
100       ph[mna] <- 0
101       mh[mna] <- 2 * mh[mna]
102       mh[pna] <- 0
103
104       if(case1or3)  a[k,] <- (func(x + ph, ...) -  func(x - mh, ...))/(2*h)
105       else for(i in 1:n)  {
106    	 if((k != 1) && (abs(a[(k-1),i]) < 1e-20)) a[k,i] <- 0 #some func are unstable near zero
107    	 else  a[k,i] <- (func(x + ph*(i==seq(n)), ...) -
108    			  func(x - mh*(i==seq(n)), ...))/(2*h[i])
109    	 }
110       if (any(is.na(a[k,]))) stop("function returns NA at ", h," distance from x.")
111       h <- h/v     # Reduced h by 1/v.
112       }
113
114   if(show.details)  {
115        cat("\n","first order approximations", "\n")
116        print(a, 12)
117    }
118
119  #------------------------------------------------------------------------
120  # 1 Applying Richardson Extrapolation to improve the accuracy of
121  #   the first and second order derivatives. The algorithm as follows:
122  #
123  #   --  For each column of the derivative matrix a,
124  #	  say, A1, A2, ..., Ar, by Richardson Extrapolation, to calculate a
125  #	  new sequence of approximations B1, B2, ..., Br used the formula
126  #
127  #	     B(i) =( A(i+1)*4^m - A(i) ) / (4^m - 1) ,  i=1,2,...,r-m
128  #
129  #		N.B. This formula assumes v=2.
130  #
131  #   -- Initially m is taken as 1  and then the process is repeated
132  #	 restarting with the latest improved values and increasing the
133  #	 value of m by one each until m equals r-1
134  #
135  # 2 Display the improved derivatives for each
136  #   m from 1 to r-1 if the argument show.details=T.
137  #
138  # 3 Return the final improved  derivative vector.
139  #-------------------------------------------------------------------------
140
141    for(m in 1:(r - 1)) {
142       a <- (a[2:(r+1-m),,drop=FALSE]*(4^m)-a[1:(r-m),,drop=FALSE])/(4^m-1)
143       if(show.details & m!=(r-1) )  {
144  	  cat("\n","Richarson improvement group No. ", m, "\n")
145  	  print(a[1:(r-m),,drop=FALSE], 12)
146  	}
147     }
148  return(c(a))
149  } else stop("indicated method ", method, "not supported.")
150}
151
152
153jacobian <- function (func, x, method="Richardson", side=NULL,
154                              method.args=list(), ...) UseMethod("jacobian")
155
156jacobian.default <- function(func, x, method="Richardson", side=NULL,
157      method.args=list(), ...){
158  f <- func(x, ...)
159  n <- length(x)	 #number of variables.
160
161  if (is.null(side)) side <- rep(NA, n)
162  else {
163       if(n != length(side))
164          stop("Non-NULL argument 'side' should have the same length as x")
165       if(any(1 != abs(side[!is.na(side)])))
166          stop("Non-NULL argument 'side' should have values NA, +1, or -1.")
167       }
168
169  if(method=="simple"){
170    #  very simple numerical approximation
171    args <- list(eps=1e-4) # default
172    args[names(method.args)] <- method.args
173
174    side[is.na(side)] <- 1
175    eps <- rep(args$eps, n) * side
176
177    df <-matrix(NA, length(f), n)
178    for (i in 1:n) {
179      dx <- x
180      dx[i] <- dx[i] + eps[i]
181      df[,i] <- (func(dx, ...) - f)/eps[i]
182     }
183    return(df)
184    }
185  else if(method=="complex"){ # Complex step gradient
186    if (any(!is.na(side))) stop("method 'complex' does not support non-NULL argument 'side'.")
187    # Complex step Jacobian
188    eps <- .Machine$double.eps
189    h0  <-  rep(0, n)
190    h0[1] <- eps * 1i
191    v <- try(func(x+h0, ...))
192    if(inherits(v, "try-error"))
193      stop("function does not accept complex argument as required by method 'complex'.")
194    if(!is.complex(v))
195      stop("function does not return a complex value as required by method 'complex'.")
196
197    h0[1]  <- 0
198    jac <- matrix(NA, length(v), n)
199    jac[, 1] <- Im(v)/eps
200    if (n == 1) return(jac)
201    for (i in 2:n) {
202      h0[i] <- eps * 1i
203      jac[, i] <- Im(func(x+h0, ...))/eps
204      h0[i]  <- 0
205      }
206    return(jac)
207    }
208  else if(method=="Richardson"){
209    args <- list(eps=1e-4, d=0.0001, zero.tol=sqrt(.Machine$double.eps/7e-7),
210                r=4, v=2, show.details=FALSE) # default
211    args[names(method.args)] <- method.args
212    d <- args$d
213    r <- args$r
214    v <- args$v
215    a <- array(NA, c(length(f),r, n) )
216
217    h <- abs(d*x) + args$eps * (abs(x) < args$zero.tol)
218    pna <- (side == 1)  & !is.na(side) # double these on plus side
219    mna <- (side == -1) & !is.na(side) # double these on minus side
220
221    for(k in 1:r)  { # successively reduce h
222       ph <- mh <- h
223       ph[pna] <- 2 * ph[pna]
224       ph[mna] <- 0
225       mh[mna] <- 2 * mh[mna]
226       mh[pna] <- 0
227
228       for(i in 1:n)  {
229    	 a[,k,i] <- (func(x + ph*(i==seq(n)), ...) -
230     		     func(x - mh*(i==seq(n)), ...))/(2*h[i])
231    	 #if((k != 1)) a[,(abs(a[,(k-1),i]) < 1e-20)] <- 0 #some func are unstable near zero
232    	 }
233       h <- h/v     # Reduced h by 1/v.
234       }
235
236   for(m in 1:(r - 1)) {
237       a <- (a[,2:(r+1-m),,drop=FALSE]*(4^m)-a[,1:(r-m),,drop=FALSE])/(4^m-1)
238     }
239  # drop second dim of a, which is now 1 (but not other dim's even if they are 1
240  return(array(a, dim(a)[c(1,3)]))
241  } else stop("indicated method ", method, "not supported.")
242}
243
244