Home
last modified time | relevance | path

Searched refs:reward (Results 1 – 25 of 1781) sorted by relevance

12345678910>>...72

/dports/finance/odoo/odoo-19d77c2a03335eb95a686bd69a1b56b38e87d609/odoo/addons/web/static/src/scss/
H A Drainbow.scss4 $reward-size: 400px;
5 $reward-size-mobile: 300px;
6 $reward-text-color: #727880;
7 $reward-base-time: 1.4s;
22 @include size($reward-size, $reward-size);
24 @include size($reward-size-mobile, $reward-size-mobile);
71 animation: reward-float $reward-base-time ease-in-out $reward-base-time infinite alternate;
77 @include size($reward-size * 0.75, $reward-size / 2);
126 …animation: reward-float $reward-base-time ease-in-out $reward-base-time infinite alternate-reverse;
159 … animation: reward-float $reward-base-time ease-in-out $reward-base-time infinite alternate;
[all …]
/dports/www/tikiwiki/tiki-21.2/lib/goal/
H A Drewardlib.php26 'applyUser' => function ($user, $reward) {
27 $this->giveBadge($reward, 'user', $user);
29 'applyGroup' => function ($group, $reward) {
40 'applyUser' => function ($user, $reward) {
65 $lib->addCredits($userId, $reward['creditType'], $reward['creditQuantity']);
101 foreach ($rewards as $reward) {
102 $type = $reward['rewardType'];
111 foreach ($rewards as $reward) {
112 $type = $reward['rewardType'];
126 if ($reward['trackerItemBadge']) {
[all …]
/dports/finance/odoo/odoo-19d77c2a03335eb95a686bd69a1b56b38e87d609/odoo/addons/coupon/models/
H A Dcoupon_reward.py56 …if self.filtered(lambda reward: reward.discount_type == 'percentage' and (reward.discount_percenta…
64 for reward in self:
66 if reward.reward_type == 'product':
68 elif reward.reward_type == 'discount':
69 if reward.discount_type == 'percentage':
70 reward_percentage = str(reward.discount_percentage)
71 if reward.discount_apply_on == 'on_order':
82 elif reward.discount_apply_on == 'cheapest_product':
84 elif reward.discount_type == 'fixed_amount':
88 amount=reward.discount_fixed_amount,
[all …]
/dports/games/lordsawar/lordsawar-0.3.2/src/editor/
H A Dreward-editor-dialog.cpp47 reward = NULL; in RewardEditorDialog()
115 if (reward) in RewardEditorDialog()
171 reward = new Reward_Item(item); in run()
173 reward = new Reward_Allies(ally, in run()
176 reward = new Reward_Map in run()
183 reward = new Reward_Ruin(hidden_ruin); in run()
186 if (reward) in run()
188 delete reward; in run()
189 reward = NULL; in run()
193 if (reward) in run()
[all …]
H A Drewardlist-dialog.cpp110 (*i)[rewards_columns.reward] = reward; in addReward()
119 d_reward = (*i)[rewards_columns.reward]; in on_reward_selected()
131 Reward *reward = d.get_reward(); in on_add_clicked() local
134 (*i)[rewards_columns.reward] = reward; in on_add_clicked()
150 Reward *a = row[rewards_columns.reward]; in on_remove_clicked()
165 Reward *reward = row[rewards_columns.reward]; in on_edit_clicked() local
175 delete reward; in on_edit_clicked()
176 reward = d.get_reward(); in on_edit_clicked()
177 *i = reward; in on_edit_clicked()
179 (*iterrow)[rewards_columns.reward] = reward; in on_edit_clicked()
[all …]
/dports/science/pybrain/pybrain-0.3.3/pybrain/rl/environments/cartpole/
H A Dbalancetask.py62 reward = 0
64 reward = 0
68 reward = -1
69 return reward
81 reward = 0
86 return reward
96 reward = 0
101 return reward
146 return reward
157 return reward
[all …]
/dports/www/opencart/opencart-3.0.3.8/upload/catalog/view/theme/default/template/extension/total/
H A Dreward.twig3 …<h4 class="panel-title"><a href="#collapse-reward" class="accordion-toggle" data-toggle="collapse"…
5 <div id="collapse-reward" class="panel-collapse collapse">
7 <label class="col-sm-2 control-label" for="input-reward">{{ entry_reward }}</label>
9 …<input type="text" name="reward" value="{{ reward }}" placeholder="{{ entry_reward }}" id="input-r…
11 …<input type="submit" value="{{ button_reward }}" id="button-reward" data-loading-text="{{ text_loa…
14 $('#button-reward').on('click', function() {
16 url: 'index.php?route=extension/total/reward/reward',
18 data: 'reward=' + encodeURIComponent($('input[name=\'reward\']').val()),
21 $('#button-reward').button('loading');
24 $('#button-reward').button('reset');
/dports/games/lordsawar/lordsawar-0.3.2/src/gui/
H A Druin-rewarded-dialog.cpp36 RuinRewardedDialog::RuinRewardedDialog(Gtk::Window &parent, Reward_Ruin *reward) in RuinRewardedDialog() argument
41 ruinmap = new RuinMap(reward->getRuin(), in RuinRewardedDialog()
51 d_reward = reward; in RuinRewardedDialog()
62 Reward *reward = d_reward->getRuin()->getReward(); in run() local
63 if (reward->getType() == Reward::ALLIES) in run()
65 else if (reward->getType() == Reward::ITEM) in run()
67 Item *item = static_cast<Reward_Item*>(reward)->getItem(); in run()
70 else if (reward->getType() == Reward::MAP) in run()
72 else if (reward->getType() == Reward::RUIN) in run()
74 else if (reward->getType() == Reward::GOLD) in run()
H A Dquest-completed-dialog.cpp37 reward = r; in QuestCompletedDialog()
63 if (reward->getType() == Reward::GOLD) in QuestCompletedDialog()
65 guint32 gold = dynamic_cast<Reward_Gold*>(reward)->getGold(); in QuestCompletedDialog()
70 else if (reward->getType() == Reward::ALLIES) in QuestCompletedDialog()
72 guint32 num = dynamic_cast<Reward_Allies*>(reward)->getNoOfAllies(); in QuestCompletedDialog()
77 else if (reward->getType() == Reward::ITEM) in QuestCompletedDialog()
79 Item *item = dynamic_cast<Reward_Item*>(reward)->getItem(); in QuestCompletedDialog()
83 else if (reward->getType() == Reward::RUIN) in QuestCompletedDialog()
85 Ruin *ruin = dynamic_cast<Reward_Ruin*>(reward)->getRuin(); in QuestCompletedDialog()
/dports/math/mlpack/mlpack-3.4.2/src/mlpack/tests/
H A Drl_components_test.cpp44 double reward, minReward = 0.0; variable
50 reward = task.Sample(state, action, state);
51 minReward = std::min(reward, minReward);
78 double reward = task.Sample(state, action); variable
80 REQUIRE(reward <= 100.0);
105 double reward = task.Sample(state, action); variable
107 REQUIRE(reward == -1.0);
134 REQUIRE(reward == -1.0);
161 REQUIRE(reward == 1.0);
187 REQUIRE(reward == 1.0);
[all …]
/dports/net-p2p/go-ethereum/go-ethereum-1.10.14/eth/gasprice/
H A Dfeehistory.go61 reward []*big.Int member
70 reward *big.Int member
80 return s[i].reward.Cmp(s[j].reward) < 0
109 for i := range bf.results.reward {
110 bf.results.reward[i] = new(big.Int)
117 reward, _ := tx.EffectiveGasTip(bf.block.BaseFee())
118 sorter[i] = txGasAndReward{gasUsed: bf.receipts[i].GasUsed, reward: reward}
131 bf.results.reward[i] = sorter[txIndex].reward
280 reward = make([][]*big.Int, blocks)
304 reward = reward[:firstMissing]
[all …]
/dports/devel/py-bullet3/bullet3-3.21/examples/pybullet/gym/pybullet_envs/minitaur/agents/ppo/
H A Dutility.py77 def discounted_return(reward, length, discount): argument
79 timestep = tf.range(reward.shape[1].value)
91 timestep = tf.range(reward.shape[1].value)
93 return_ = tf.zeros_like(reward)
95 return_ += reward
96 reward = discount * tf.concat([reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1)
102 def lambda_return(reward, value, length, discount, lambda_): argument
104 timestep = tf.range(reward.shape[1].value)
117 def lambda_advantage(reward, value, length, discount): argument
119 timestep = tf.range(reward.shape[1].value)
[all …]
/dports/devel/py-bullet3/bullet3-3.21/examples/pybullet/gym/pybullet_envs/agents/ppo/
H A Dutility.py70 def discounted_return(reward, length, discount): argument
72 timestep = tf.range(reward.shape[1].value)
84 timestep = tf.range(reward.shape[1].value)
86 return_ = tf.zeros_like(reward)
88 return_ += reward
89 reward = discount * tf.concat([reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1)
95 def lambda_return(reward, value, length, discount, lambda_): argument
97 timestep = tf.range(reward.shape[1].value)
110 def lambda_advantage(reward, value, length, discount): argument
112 timestep = tf.range(reward.shape[1].value)
[all …]
/dports/games/stendhal/stendhal-1.35/src/games/stendhal/server/maps/quests/
H A DJailedBarbarian.java119 reward.add(new DropItemAction("scythe")); in step2()
120 reward.add(new IncreaseXPAction(1000)); in step2()
122 reward.add(new IncreaseKarmaAction(10)); in step2()
129 new MultipleActions(reward)); in step2()
190 reward.add(new DropItemAction("egg")); in step5()
191 reward.add(new IncreaseXPAction(1000)); in step5()
193 reward.add(new IncreaseKarmaAction(10)); in step5()
200 new MultipleActions(reward)); in step5()
267 reward.add(new IncreaseXPAction(50000)); in step8()
270 reward.add(new IncreaseKarmaAction(15)); in step8()
[all …]
H A DHungryJoshua.java200 reward.add(new DropItemAction("sandwich", FOOD_AMOUNT)); in step_2()
201 reward.add(new IncreaseXPAction(150)); in step_2()
202 reward.add(new SetQuestAction(QUEST_SLOT, "joshua")); in step_2()
203 reward.add(new IncreaseKarmaAction(15)); in step_2()
204 reward.add(new InflictStatusOnNPCAction("sandwich")); in step_2()
211 new MultipleActions(reward)); in step_2()
246 reward.add(new IncreaseXPAction(50)); in step_3()
247 reward.add(new SetQuestAction(QUEST_SLOT, "done")); in step_3()
250 reward.add(new EquipItemAction("keyring", 1, true)); in step_3()
252 reward.add(new EnableFeatureAction("keyring")); in step_3()
[all …]
H A DGoodiesForRudolph.java193 reward.add(new DropItemAction("reindeer moss", 5)); in prepareBringingStep()
194 reward.add(new DropItemAction("carrot", 10)); in prepareBringingStep()
195 reward.add(new DropItemAction("apple", 10)); in prepareBringingStep()
196 reward.add(new EquipItemAction("money", 50)); in prepareBringingStep()
197 reward.add(new EquipItemAction("snowglobe")); in prepareBringingStep()
198 reward.add(new IncreaseXPAction(100)); in prepareBringingStep()
199 reward.add(new SetQuestAction(QUEST_SLOT, "done")); in prepareBringingStep()
200 reward.add(new IncreaseKarmaAction(60)); in prepareBringingStep()
201 reward.add(new InflictStatusOnNPCAction("apple")); in prepareBringingStep()
203 reward.add(new SetQuestAction(QUEST_SLOT, 0, "done")); in prepareBringingStep()
[all …]
/dports/science/pybrain/pybrain-0.3.3/pybrain/rl/environments/flexcube/
H A Dtasks.py30 self.reward = [0.0]
49 self.reward[0] = self.rawReward - self.getPain()
50 return self.reward[0]
86 self.reward[0] = 0.0
121 return self.reward[0]
148 else: self.reward[0] = -self.getPain()
150 return self.reward[0]
167 else: self.reward[0] = -self.getPain()
168 return self.reward[0]
197 self.reward[0] = -self.getPain()
[all …]
/dports/math/mlpack/mlpack-3.4.2/src/mlpack/methods/reinforcement_learning/replay/
H A Drandom_replay.hpp57 double reward; member
106 double reward, in Store() argument
111 nStepBuffer.push_back({state, action, reward, nextState, isEnd}); in Store()
125 GetNStepInfo(reward, nextState, isEnd, discount); in Store()
132 rewards(position) = reward; in Store()
151 void GetNStepInfo(double& reward, in GetNStepInfo() argument
156 reward = nStepBuffer.back().reward; in GetNStepInfo()
164 reward = nStepBuffer[i].reward + discount * reward * (1 - iE); in GetNStepInfo()
/dports/science/py-pyaixi/pyaixi-1.0.4.post1/pyaixi/environments/
H A Dkuhn_poker.py114 self.reward = 0
148 self.reward = rPassLoss
150 return (self.observation, self.reward)
164 self.reward = rPassWin
166 return (self.observation, self.reward)
175 self.reward = rBetWin if self.env_action == aBet else rPassWin
177 self.reward = rBetLoss if self.action == aBet else rPassLoss
181 return (self.observation, self.reward)
196 agent_wins = (self.reward == rPassWin or self.reward == rBetWin)
198 ", reward = %d (%d)" % (self.reward, self.reward - 2) + os.linesep
H A Drock_paper_scissors.py87 self.reward = 0
103 if (self.observation == aRock) and (self.reward == rLose):
112 self.reward = rDraw
115 self.reward = rWin if self.observation == oScissors else rLose
118 self.reward = rWin if self.observation == oPaper else rLose
121 self.reward = rWin if self.observation == oRock else rLose
125 return (self.observation, self.reward)
138 "Agent " + reward_text[self.reward]
H A Dextended_tiger.py142 self.reward = 0
160 self.reward = rInvalid
166 self.reward = rListen
170 self.reward = rTiger if self.tiger == oLeft else rGold
175 self.reward = rTiger if self.tiger == oRight else rGold
179 self.reward = rStand
183 return (self.observation, self.reward)
209 reward_text[self.reward],
210 (self.reward - 100),
/dports/science/pybrain/pybrain-0.3.3/pybrain/rl/environments/ode/tasks/
H A Djohnnie.py77reward = self.env.getSensorByName('headPos')[1] / float(self.epiLen) #reward is hight of head
79 reward = clip(reward, -14.0, 4.0)
80 return reward
149 reward = self.env.getSensorByName('SpecificBodyPositionSensor8')[1] #reward is hight of head
150 if reward > self.maxHight:
151 self.maxHight = reward
153 reward = self.maxHight
155 reward = 0.0
156 return reward
179 reward = clip(reward, -14.0, 4.0)
[all …]
/dports/science/py-pyaixi/pyaixi-1.0.4.post1/pyaixi/agents/
H A Dmc_aixi_ctw.py209 reward = self.decode_reward(reward_symbols)
213 return (observation, reward)
227 def encode_percept(self, observation, reward): argument
287 observation, reward = self.decode_percept(percept_symbols)
290 self.total_reward += reward
294 return (observation, reward)
395 def model_update_percept(self, observation, reward): argument
409 percept_symbols = self.encode_percept(observation, reward)
421 self.total_reward += reward
425 def percept_probability(self, observation, reward): argument
[all …]
/dports/net-im/chatterino2/chatterino2-2.3.4/src/providers/twitch/
H A DChannelPointReward.cpp31 rapidjson::Value reward; in ChannelPointReward() local
33 rj::getSafeObject(redemption, "reward", reward))) in ChannelPointReward()
39 if (!(this->hasParsedSuccessfully = rj::getSafe(reward, "id", this->id))) in ChannelPointReward()
46 rj::getSafe(reward, "channel_id", this->channelId))) in ChannelPointReward()
53 rj::getSafe(reward, "title", this->title))) in ChannelPointReward()
60 rj::getSafe(reward, "cost", this->cost))) in ChannelPointReward()
67 reward, "is_user_input_required", this->isUserInputRequired))) in ChannelPointReward()
82 if (rj::getSafeObject(reward, "image", obj) && !obj.IsNull() && in ChannelPointReward()
/dports/math/mlpack/mlpack-3.4.2/src/mlpack/methods/ann/visitor/
H A Dreward_set_visitor_impl.hpp22 inline RewardSetVisitor::RewardSetVisitor(const double reward) : reward(reward) in RewardSetVisitor() argument
44 layer->Reward() = reward; in LayerReward()
48 boost::apply_visitor(RewardSetVisitor(reward), in LayerReward()
61 boost::apply_visitor(RewardSetVisitor(reward), in LayerReward()
72 layer->Reward() = reward; in LayerReward()

12345678910>>...72