1% Generated by roxygen2: do not edit by hand
2% Please edit documentation in R/smqr.R
3\name{conquer.reg}
4\alias{conquer.reg}
5\title{Penalized Convolution-Type Smoothed Quantile Regression}
6\usage{
7conquer.reg(
8  X,
9  Y,
10  lambda = 0.2,
11  tau = 0.5,
12  kernel = c("Gaussian", "logistic", "uniform", "parabolic", "triangular"),
13  h = 0,
14  penalty = c("lasso", "scad", "mcp"),
15  para = NULL,
16  epsilon = 0.001,
17  iteMax = 500,
18  phi0 = 0.01,
19  gamma = 1.2,
20  iteTight = 3
21)
22}
23\arguments{
24\item{X}{A \eqn{n} by \eqn{p} design matrix. Each row is a vector of observation with \eqn{p} covariates.}
25
26\item{Y}{An \eqn{n}-dimensional response vector.}
27
28\item{lambda}{(\strong{optional}) Regularization parameter. Default is 0.2.}
29
30\item{tau}{(\strong{optional}) Quantile level (between 0 and 1). Default is 0.5.}
31
32\item{kernel}{(\strong{optional}) A character string specifying the choice of kernel function. Default is "Gaussian". Choices are "Gaussian", "logistic", "uniform", "parabolic" and "triangular".}
33
34\item{h}{(\strong{optional}) Bandwidth/smoothing parameter. Default is \eqn{\max\{0.5 * (log(p) / n)^{0.25}, 0.05\}}.}
35
36\item{penalty}{(\strong{optional}) A character string specifying the penalty. Default is "lasso". The other two options are "scad" and "mcp".}
37
38\item{para}{(\strong{optional}) A constant parameter for "scad" and "mcp". Do not need to specify if the penalty is lasso. The default values are 3.7 for "scad" and 3 for "mcp".}
39
40\item{epsilon}{(\strong{optional}) A tolerance level for the stopping rule. The iteration will stop when the maximum magnitude of the change of coefficient updates is less than \code{epsilon}. Default is 0.001.}
41
42\item{iteMax}{(\strong{optional}) Maximum number of iterations. Default is 500.}
43
44\item{phi0}{(\strong{optional}) The initial quadratic coefficient parameter in the local adaptive majorize-minimize algorithm. Default is 0.01.}
45
46\item{gamma}{(\strong{optional}) The adaptive search parameter (greater than 1) in the local adaptive majorize-minimize algorithm. Default is 1.2.}
47
48\item{iteTight}{(\strong{optional}) Maximum number of tightening iterations in the iteratively reweighted \eqn{\ell_1}-penalized algorithm. Do not need to specify if the penalty is lasso. Default is 3.}
49}
50\value{
51An object containing the following items will be returned:
52\describe{
53\item{\code{coeff}}{A \eqn{(p + 1)} vector of estimated coefficients, including the intercept.}
54\item{\code{bandwidth}}{Bandwidth value.}
55\item{\code{tau}}{Quantile level.}
56\item{\code{kernel}}{Kernel function.}
57\item{\code{penalty}}{Penalty type.}
58\item{\code{lambda}}{Regularization parameter.}
59\item{\code{n}}{Sample size.}
60\item{\code{p}}{Number of the covariates.}
61}
62}
63\description{
64Fit sparse quantile regression models in high dimensions via regularized conquer methods with "lasso", "scad" and "mcp" penalties. For "scad" and "mcp", the iteratively reweighted \eqn{\ell_1}-penalized algorithm is complemented with a local adpative majorize-minimize algorithm.
65}
66\examples{
67n = 200; p = 500; s = 10
68beta = c(rep(1.5, s), rep(0, p - s))
69X = matrix(rnorm(n * p), n, p)
70Y = X \%*\% beta + rt(n, 2)
71
72## Regularized conquer with lasso penalty at tau = 0.8
73fit.lasso = conquer.reg(X, Y, lambda = 0.05, tau = 0.8, kernel = "Gaussian", penalty = "lasso")
74beta.lasso = fit.lasso$coeff
75
76#' ## Regularized conquer with scad penalty at tau = 0.8
77fit.scad = conquer.reg(X, Y, lambda = 0.13, tau = 0.8, kernel = "Gaussian", penalty = "scad")
78beta.scad = fit.scad$coeff
79}
80\references{
81Fan, J., Liu, H., Sun, Q. and Zhang, T. (2018). I-LAMM for sparse learning: Simultaneous control of algorithmic complexity and statistical error. Ann. Statist. 46 814-841.
82
83Koenker, R. and Bassett, G. (1978). Regression quantiles. Econometrica 46 33-50.
84
85Tan, K. M., Wang, L. and Zhou, W.-X. (2021). High-dimensional quantile regression: convolution smoothing and concave regularization. J. Roy. Statist. Soc. Ser. B, to appear.
86}
87\seealso{
88See \code{\link{conquer.cv.reg}} for regularized quantile regression with cross-validation.
89}
90\author{
91Xuming He <xmhe@umich.edu>, Xiaoou Pan <xip024@ucsd.edu>, Kean Ming Tan <keanming@umich.edu>, and Wen-Xin Zhou <wez243@ucsd.edu>
92}
93