1\name{dknn.train}
2\alias{dknn.train}
3\title{
4Depth-Based kNN
5}
6\description{
7The implementation of the affine-invariant depht-based kNN of Paindaveine and Van Bever (2015).
8}
9\usage{
10dknn.train(data, kMax = -1, depth = "halfspace", seed = 0)
11}
12%- maybe also 'usage' for other objects documented here.
13\arguments{
14  \item{data}{
15Matrix containing training sample where each of \eqn{n} rows is one object of the training sample where first \eqn{d} entries are inputs and the last entry is output (class label).
16}
17  \item{kMax}{
18the maximal value for the number of neighbours. If the value is set to -1, the default value is calculated as n/2, but at least 2, at most n-1.
19}
20  \item{depth}{
21Character string determining which depth notion to use; the default value is \code{"halfspace"}.
22Currently the method supports the following depths: "halfspace", "Mahalanobis", "simplicial".
23}
24  \item{seed}{
25the random seed. The default value \code{seed=0} makes no changes.
26}
27}
28%\details{
29%%  ~~ If necessary, more details than the description above ~~
30%}
31\value{
32  The returned object contains technical information for classification, including the found optimal value \code{k}.
33}
34\references{
35Paindaveine, D. and Van Bever, G. (2015). Nonparametrically consistent depth-based classifiers. \emph{Bernoulli} \bold{21} 62--82.
36}
37%\note{
38%%  ~~further notes~~
39%}
40
41%% ~Make other sections like Warning with \section{Warning }{....} ~
42
43\seealso{
44\code{\link{dknn.classify}} and \code{\link{dknn.classify.trained}} to classify with the Dknn-classifier.
45
46\code{\link{ddalpha.train}} to train the DD\eqn{\alpha}-classifier.
47
48\code{\link{ddalpha.getErrorRateCV}} and \code{\link{ddalpha.getErrorRatePart}} to get error rate of the Dknn-classifier on particular data (set \code{separator = "Dknn"}).
49}
50\examples{
51
52# Generate a bivariate normal location-shift classification task
53# containing 200 training objects and 200 to test with
54class1 <- mvrnorm(200, c(0,0),
55                  matrix(c(1,1,1,4), nrow = 2, ncol = 2, byrow = TRUE))
56class2 <- mvrnorm(200, c(2,2),
57                  matrix(c(1,1,1,4), nrow = 2, ncol = 2, byrow = TRUE))
58trainIndices <- c(1:100)
59testIndices <- c(101:200)
60propertyVars <- c(1:2)
61classVar <- 3
62trainData <- rbind(cbind(class1[trainIndices,], rep(1, 100)),
63                   cbind(class2[trainIndices,], rep(2, 100)))
64testData <- rbind(cbind(class1[testIndices,], rep(1, 100)),
65                  cbind(class2[testIndices,], rep(2, 100)))
66data <- list(train = trainData, test = testData)
67
68# Train the classifier
69# and get the classification error rate
70cls <- dknn.train(data$train, kMax = 20, depth = "Mahalanobis")
71cls$k
72classes1 <- dknn.classify.trained(data$test[,propertyVars], cls)
73cat("Classification error rate: ",
74    sum(unlist(classes1) != data$test[,classVar])/200)
75
76# Classify the new data based on the old ones in one step
77classes2 <- dknn.classify(data$test[,propertyVars], data$train, k = cls$k, depth = "Mahalanobis")
78cat("Classification error rate: ",
79    sum(unlist(classes2) != data$test[,classVar])/200)
80
81}
82% Add one or more standard keywords, see file 'KEYWORDS' in the
83% R documentation directory.
84\keyword{ multivariate }
85\keyword{ nonparametric }
86\keyword{ classif }
87