1<?php
2
3declare(strict_types=1);
4
5namespace Phpml\Classification\Linear;
6
7use Phpml\Exception\InvalidArgumentException;
8
9class Adaline extends Perceptron
10{
11    /**
12     * Batch training is the default Adaline training algorithm
13     */
14    public const BATCH_TRAINING = 1;
15
16    /**
17     * Online training: Stochastic gradient descent learning
18     */
19    public const ONLINE_TRAINING = 2;
20
21    /**
22     * Training type may be either 'Batch' or 'Online' learning
23     *
24     * @var string|int
25     */
26    protected $trainingType;
27
28    /**
29     * Initalize an Adaline (ADAptive LInear NEuron) classifier with given learning rate and maximum
30     * number of iterations used while training the classifier <br>
31     *
32     * Learning rate should be a float value between 0.0(exclusive) and 1.0 (inclusive) <br>
33     * Maximum number of iterations can be an integer value greater than 0 <br>
34     * If normalizeInputs is set to true, then every input given to the algorithm will be standardized
35     * by use of standard deviation and mean calculation
36     *
37     * @throws InvalidArgumentException
38     */
39    public function __construct(
40        float $learningRate = 0.001,
41        int $maxIterations = 1000,
42        bool $normalizeInputs = true,
43        int $trainingType = self::BATCH_TRAINING
44    ) {
45        if (!in_array($trainingType, [self::BATCH_TRAINING, self::ONLINE_TRAINING], true)) {
46            throw new InvalidArgumentException('Adaline can only be trained with batch and online/stochastic gradient descent algorithm');
47        }
48
49        $this->trainingType = $trainingType;
50
51        parent::__construct($learningRate, $maxIterations, $normalizeInputs);
52    }
53
54    /**
55     * Adapts the weights with respect to given samples and targets
56     * by use of gradient descent learning rule
57     */
58    protected function runTraining(array $samples, array $targets): void
59    {
60        // The cost function is the sum of squares
61        $callback = function ($weights, $sample, $target) {
62            $this->weights = $weights;
63
64            $output = $this->output($sample);
65            $gradient = $output - $target;
66            $error = $gradient ** 2;
67
68            return [$error, $gradient];
69        };
70
71        $isBatch = $this->trainingType == self::BATCH_TRAINING;
72
73        parent::runGradientDescent($samples, $targets, $callback, $isBatch);
74    }
75}
76