/dports/science/py-chainer/chainer-7.8.0/chainer/ |
H A D | _backprop_utils.py | 94 func, target_input_indexes, grad_outputs, grad_inputs, is_debug): argument 115 assert isinstance(target_input_indexes, tuple) 116 assert target_input_indexes == tuple(sorted(target_input_indexes)) 123 for i in target_input_indexes 129 target_input_indexes, grad_outputs, grad_inputs_tuple) 138 target_input_indexes, grad_outputs) 153 gxs = tuple([gxs[i] for i in target_input_indexes]) 154 elif len_gxs != len(target_input_indexes): 156 if len(func.inputs) == len(target_input_indexes): 162 % (len_gxs, len(func.inputs), len(target_input_indexes))) [all …]
|
H A D | function_node.py | 694 def backward(self, target_input_indexes, grad_outputs): argument 738 return (None,) * len(target_input_indexes) 740 def backward_accumulate(self, target_input_indexes, grad_outputs, argument 788 assert isinstance(target_input_indexes, tuple) 799 def _backward_chainerx(self, target_input_indexes, grad_outputs, argument 804 assert len(target_input_indexes) > 0 833 tuple(target_input_indexes), 845 def _backward_target_inputs(self, target_input_indexes, grad_outputs): argument 851 gxs = self.backward(target_input_indexes, grad_outputs) 855 gxs = tuple([gxs[i] for i in target_input_indexes]) [all …]
|
H A D | _backprop.py | 189 target_input_indexes = tuple([ 197 if not target_input_indexes: 216 target_inputs = [inputs[i] for i in target_input_indexes] 226 func, target_input_indexes, out_grad, in_grad, is_debug)
|
H A D | function.py | 181 def backward(self, target_input_indexes, grad_outputs): argument 221 for i in target_input_indexes:
|
/dports/science/py-chainer/chainer-7.8.0/chainerx_cc/chainerx/python/ |
H A D | chainer_interop.cc | 163 std::vector<size_t> target_input_indexes; in InitChainerxChainerInterop() local 164 target_input_indexes.reserve(bctx.input_count()); in InitChainerxChainerInterop() 168 target_input_indexes.emplace_back(j); in InitChainerxChainerInterop() 172 … CHAINERX_ASSERT(IsUniqueAndIncreasingIndexes(target_input_indexes, bctx.input_count())); in InitChainerxChainerInterop() 212 chainer_target_input_indexes.reserve(target_input_indexes.size()); in InitChainerxChainerInterop() 213 for (size_t j : target_input_indexes) { in InitChainerxChainerInterop() 223 CHAINERX_ASSERT(chainer_grad_inputs.size() == target_input_indexes.size()); in InitChainerxChainerInterop() 226 for (size_t k = 0; k < target_input_indexes.size(); ++k) { in InitChainerxChainerInterop() 227 size_t j = gsl::at(target_input_indexes, k); in InitChainerxChainerInterop()
|
/dports/science/py-chainer/chainer-7.8.0/chainer/distributions/ |
H A D | laplace.py | 23 def backward(self, target_input_indexes, grad_outputs): argument 38 def backward(self, target_input_indexes, grad_outputs): argument
|
H A D | multivariate_normal.py | 75 def backward(self, target_input_indexes, grad_outputs): argument
|
/dports/science/py-chainer/chainer-7.8.0/onnx_chainer/ |
H A D | replace_func.py | 47 def backward(self, target_input_indexes, grad_outputs): argument 55 return tuple(grad_input if i in target_input_indexes else None
|
/dports/science/py-chainer/chainer-7.8.0/chainermn/functions/ |
H A D | pseudo_connect.py | 15 def backward(self, target_input_indexes, grad_outputs): argument
|
/dports/science/py-chainer/chainer-7.8.0/chainer/functions/array/ |
H A D | as_strided.py | 331 def backward(self, target_input_indexes, grad_outputs): argument
|
/dports/science/py-chainer/chainer-7.8.0/chainer/graph_optimizations/ |
H A D | static_graph.py | 886 def backward(self, target_input_indexes, grad_outputs): argument
|