/dports/science/py-dlib/dlib-19.22/dlib/dnn/ |
H A D | solvers.h | 39 const float learning_rate, in operator() 64 const float learning_rate, in operator() 83 const float learning_rate, in operator() 102 const float learning_rate, in operator() 113 const float learning_rate, in operator() 151 const float learning_rate, in update_considering_bias() argument 226 const float learning_rate, in operator() 255 const float learning_rate, in operator() 274 const float learning_rate, in operator() 293 const float learning_rate, in operator() [all …]
|
/dports/science/dlib-cpp/dlib-19.22/dlib/dnn/ |
H A D | solvers.h | 39 const float learning_rate, in operator() 64 const float learning_rate, in operator() 83 const float learning_rate, in operator() 102 const float learning_rate, in operator() 113 const float learning_rate, in operator() 151 const float learning_rate, in update_considering_bias() argument 226 const float learning_rate, in operator() 255 const float learning_rate, in operator() 274 const float learning_rate, in operator() 293 const float learning_rate, in operator() [all …]
|
/dports/science/py-scikit-learn/scikit-learn-1.0.2/sklearn/linear_model/ |
H A D | _stochastic_gradient.py | 106 self.learning_rate = learning_rate 545 learning_rate=learning_rate, 615 learning_rate=learning_rate, 625 learning_rate=learning_rate, 847 learning_rate=self.learning_rate, 889 learning_rate=self.learning_rate, 1186 learning_rate=learning_rate, 1368 learning_rate=learning_rate, 1923 learning_rate=learning_rate, 2116 learning_rate=learning_rate, [all …]
|
/dports/science/py-scikit-learn/scikit-learn-1.0.2/sklearn/neural_network/ |
H A D | _stochastic_optimizers.py | 27 self.learning_rate = float(learning_rate_init) 149 self.learning_rate = ( 159 if self.learning_rate <= 1e-6: 164 self.learning_rate /= 5.0 166 print(msg + " Setting learning rate to %f" % self.learning_rate) 184 self.momentum * velocity - self.learning_rate * grad 191 self.momentum * velocity - self.learning_rate * grad 280 self.learning_rate = ( 286 -self.learning_rate * m / (np.sqrt(v) + self.epsilon)
|
/dports/math/py-flax/flax-0.3.3/flax/optim/ |
H A D | lamb.py | 26 learning_rate: np.ndarray 45 def __init__(self, learning_rate=None, beta1=0.9, beta2=0.999, weight_decay=0, argument 60 learning_rate, beta1, beta2, weight_decay, eps) 67 assert hyper_params.learning_rate is not None, 'no learning rate provided.' 71 learning_rate = hyper_params.learning_rate 91 new_param = param - trust_ratio * learning_rate * update
|
H A D | sgd.py | 24 learning_rate: np.ndarray 30 def __init__(self, learning_rate=None): argument 36 hyper_params = _GradientDescentHyperParams(learning_rate) 44 assert hyper_params.learning_rate is not None, 'no learning rate provided.' 45 new_param = param - hyper_params.learning_rate * grad
|
H A D | adadelta.py | 25 learning_rate: float 46 learning_rate: float = None, 58 hyper_params = _AdadeltaHyperParams(learning_rate, rho, eps, weight_decay) 67 assert hyper_params.learning_rate is not None, 'no learning rate provided.' 76 new_param = param - hyper_params.learning_rate * delta 77 new_param -= hyper_params.learning_rate * weight_decay * param
|
H A D | adagrad.py | 25 learning_rate: float 38 def __init__(self, learning_rate: float = None, eps=1e-8): 44 hyper_params = _AdagradHyperParams(learning_rate, eps) 54 assert hyper_params.learning_rate is not None, 'no learning rate provided.' 56 new_param = param - hyper_params.learning_rate * grad / (jnp.sqrt(new_G) +
|
H A D | adam.py | 27 learning_rate: np.ndarray 66 learning_rate=None, argument 84 hyper_params = _AdamHyperParams(learning_rate, beta1, beta2, eps, 92 assert hyper_params.learning_rate is not None, 'no learning rate provided.' 106 new_param = param - hyper_params.learning_rate * grad_ema_corr / denom 107 new_param -= hyper_params.learning_rate * weight_decay * param
|
H A D | momentum.py | 26 learning_rate: np.ndarray 40 def __init__(self, learning_rate=None, beta=0.9, weight_decay=0, argument 53 learning_rate, beta, weight_decay, nesterov) 61 assert hyper_params.learning_rate is not None, 'no learning rate provided.' 70 new_param = param - hyper_params.learning_rate * d_p
|
H A D | rmsprop.py | 25 learning_rate: float 41 def __init__(self, learning_rate: float = None, beta2=0.9, eps=1e-8, 56 hyper_params = _RMSPropHyperParams(learning_rate, beta2, eps, centered) 67 assert hyper_params.learning_rate is not None, 'no learning rate provided.' 76 new_param = param - hyper_params.learning_rate * grad / (
|
H A D | lars.py | 26 learning_rate: np.ndarray 45 def __init__(self, learning_rate=None, beta=0.9, weight_decay=0, argument 61 learning_rate, beta, weight_decay, trust_coefficient, eps, nesterov) 69 assert hyper_params.learning_rate is not None, 'no learning rate provided.' 77 scaled_lr = hyper_params.learning_rate * clipped_trust_ratio
|
/dports/math/py-pymc3/pymc-3.11.4/pymc3/variational/ |
H A D | updates.py | 187 def sgd(loss_or_grads=None, params=None, learning_rate=1e-3): argument 235 updates[param] = param - learning_rate * grad 289 def momentum(loss_or_grads=None, params=None, learning_rate=1e-3, momentum=0.9): argument 345 updates = sgd(loss_or_grads, params, learning_rate) 465 updates = sgd(loss_or_grads, params, learning_rate) 469 def adagrad(loss_or_grads=None, params=None, learning_rate=1.0, epsilon=1e-6): argument 543 updates[param] = param - (learning_rate * grad / tt.sqrt(accu_new + epsilon)) 589 updates[param] = param - (learning_rate * grad / tt.sqrt(accu_sum + epsilon)) 773 updates[param] = param - learning_rate * update 850 a_t = learning_rate * tt.sqrt(one - beta2 ** t) / (one - beta1 ** t) [all …]
|
/dports/lang/nim/nim-1.6.2/tests/typerel/ |
H A D | t7734.nim | 3 learning_rate: T 6 learning_rate: T 15 proc optimizer[M; T: SomeFloat](model: M, _: typedesc[Foo], learning_rate: T): Foo[T] = 16 result.learning_rate = learning_rate
|
/dports/misc/mxnet/incubator-mxnet-1.9.0/example/gluon/house_prices/ |
H A D | kaggle_k_fold_cross_validation.py | 82 def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, argument 89 {'learning_rate': learning_rate, 105 learning_rate, weight_decay, batch_size): argument 130 learning_rate, weight_decay, batch_size) 142 learning_rate = 0.3 variable 148 learning_rate, weight_decay, batch_size) 152 def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate, argument 156 _ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, 163 learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
|
/dports/misc/py-mxnet/incubator-mxnet-1.9.0/example/gluon/house_prices/ |
H A D | kaggle_k_fold_cross_validation.py | 82 def train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, argument 89 {'learning_rate': learning_rate, 105 learning_rate, weight_decay, batch_size): argument 130 learning_rate, weight_decay, batch_size) 142 learning_rate = 0.3 variable 148 learning_rate, weight_decay, batch_size) 152 def learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate, argument 156 _ = train(net, X_train, y_train, epochs, verbose_epoch, learning_rate, 163 learn(epochs, verbose_epoch, X_train, y_train, test, learning_rate,
|
/dports/math/py-deap/deap-1.3.1/examples/eda/ |
H A D | pbil.py | 27 def __init__(self, ndim, learning_rate, mut_prob, mut_shift, lambda_): argument 29 self.learning_rate = learning_rate 45 self.prob_vector[i] *= 1.0 - self.learning_rate 46 self.prob_vector[i] += value * self.learning_rate 68 pbil = PBIL(ndim=50, learning_rate=0.3, mut_prob=0.1,
|
/dports/misc/py-gluoncv/gluon-cv-0.9.0/gluoncv/utils/ |
H A D | lr_scheduler.py | 24 self.learning_rate = 0 39 return self.learning_rate 50 self.learning_rate = lr.learning_rate 120 return self.learning_rate 145 self.learning_rate = self.base_lr * factor 147 self.learning_rate = self.target_lr + (self.base_lr - self.target_lr) * factor
|
/dports/misc/mxnet/incubator-mxnet-1.9.0/example/speech_recognition/ |
H A D | train.py | 42 def __init__(self, learning_rate=0.001): argument 44 self.learning_rate = learning_rate 47 return self.learning_rate 71 learning_rate = args.config.getfloat('train', 'learning_rate') 115 lr_scheduler = SimpleLRScheduler(learning_rate=learning_rate) 184 lr_scheduler.learning_rate=learning_rate/learning_rate_annealing
|
/dports/misc/py-mxnet/incubator-mxnet-1.9.0/example/speech_recognition/ |
H A D | train.py | 42 def __init__(self, learning_rate=0.001): argument 44 self.learning_rate = learning_rate 47 return self.learning_rate 71 learning_rate = args.config.getfloat('train', 'learning_rate') 115 lr_scheduler = SimpleLRScheduler(learning_rate=learning_rate) 184 lr_scheduler.learning_rate=learning_rate/learning_rate_annealing
|
/dports/misc/mxnet/incubator-mxnet-1.9.0/example/ssd/train/ |
H A D | train_net.py | 48 def get_lr_scheduler(learning_rate, lr_refactor_step, lr_refactor_ratio, argument 75 return (learning_rate, None) 77 lr = learning_rate 82 if lr != learning_rate: 92 prefix, ctx, begin_epoch, end_epoch, frequent, learning_rate, argument 245 learning_rate, lr_scheduler = get_lr_scheduler(learning_rate, lr_refactor_step, 247 optimizer_params={'learning_rate':learning_rate,
|
/dports/misc/py-mxnet/incubator-mxnet-1.9.0/example/ssd/train/ |
H A D | train_net.py | 48 def get_lr_scheduler(learning_rate, lr_refactor_step, lr_refactor_ratio, argument 75 return (learning_rate, None) 77 lr = learning_rate 82 if lr != learning_rate: 92 prefix, ctx, begin_epoch, end_epoch, frequent, learning_rate, argument 245 learning_rate, lr_scheduler = get_lr_scheduler(learning_rate, lr_refactor_step, 247 optimizer_params={'learning_rate':learning_rate,
|
/dports/math/libxsmm/libxsmm-1.16.3/src/template/ |
H A D | libxsmm_dnn_optimizer_sgd_st_generic.tpl.c | 44 __m512 vlr = _mm512_set1_ps( handle->desc.learning_rate ); 54 master[i] = master[i] - (handle->desc.learning_rate*t1.f); 66 master[i] = master[i] - (handle->desc.learning_rate*t1.f); 75 __m512 vlr = _mm512_set1_ps( handle->desc.learning_rate ); 80 filter[i] = filter[i] - (handle->desc.learning_rate*dfilter[i]); 85 filter[i] = filter[i] - (handle->desc.learning_rate*dfilter[i]);
|
/dports/misc/orange3/orange3-3.29.1/Orange/classification/ |
H A D | xgb.py | 30 learning_rate=None, argument 58 learning_rate=learning_rate, 93 learning_rate=None, argument 121 learning_rate=learning_rate,
|
/dports/misc/orange3/orange3-3.29.1/Orange/regression/ |
H A D | xgb.py | 29 learning_rate=None, argument 56 super().__init__(max_depth=max_depth, learning_rate=learning_rate, 81 learning_rate=None, argument 108 super().__init__(max_depth=max_depth, learning_rate=learning_rate,
|