/dports/devel/py-distributed/distributed-2021.11.2/distributed/ |
H A D | active_memory_manager.py | 143 if not ts.who_has: 145 who_has = {ws_snd.address for ws_snd in ts.who_has - pending_drop} 146 assert who_has # Never drop the last replica 148 assert ws_rec not in ts.who_has 149 repl_by_worker[ws_rec][ts] = who_has 151 assert ws in ts.who_has 246 candidates -= ts.who_has 271 if len(ts.who_has) - len(pending_drop) < 2: 274 candidates = ts.who_has.copy() 276 candidates &= ts.who_has [all …]
|
H A D | utils_comm.py | 20 async def gather_from_workers(who_has, rpc, close=True, serializers=None, who=None): argument 40 original_who_has = who_has 41 who_has = {k: set(v) for k, v in who_has.items()} 45 while len(results) + len(all_bad_keys) < len(who_has): 49 for key, addresses in who_has.items(): 159 who_has = {k: [w for w, _, _ in v] for k, v in groupby(1, L).items()} 161 return (names, who_has, nbytes)
|
H A D | worker.py | 199 self.who_has = set() 1363 who_has = { 1377 who_has, 1938 who_has=None, argument 2039 for w in ts.who_has: 2088 assert ts.who_has 2344 for w in ts.who_has: 3018 who_has = {k: v for k, v in who_has.items() if v} 3031 who_has = await retry_operation(self.scheduler.who_has, keys=deps) 3033 return who_has [all …]
|
H A D | objects.py | 21 return get_template("who_has.html.j2").render(who_has=self)
|
H A D | scheduler.py | 1578 def who_has(self) -> "set[WorkerState]": member in TaskState 5801 self.update_data(who_has=who_has, nbytes=nbytes, client=client) 5817 who_has = {} 5823 who_has[key] = [] 6034 self.rpc(addr=worker_address).gather, who_has=who_has 6043 return set(who_has) 6050 return set(who_has) 6053 keys_ok: Set = who_has.keys() 6889 who_has: dict, 6903 who_has = { [all …]
|
H A D | client.py | 1913 who_has = await retry_operation(self.scheduler.who_has, keys=keys) 1915 who_has, rpc=self.rpc, close=False 2047 who_has={key: [local_worker.address] for key in data}, 2067 _, who_has, nbytes = await scatter_to_workers( 2072 who_has=who_has, nbytes=nbytes, client=self.id 3231 def who_has(self, futures=None, **kwargs): member in Client 3264 return WhoHas(await self.scheduler.who_has(keys=keys, **kwargs))
|
/dports/devel/py-distributed/distributed-2021.11.2/distributed/widgets/templates/ |
H A D | who_has.html.j2 | 8 {% for title, keys in who_has.items() %}
|
/dports/security/nmap/nmap-7.91/ |
H A D | tcpip.cc | 149 char who_has[INET_ADDRSTRLEN], tell[INET_ADDRSTRLEN]; in traceArp() local 173 inet_ntop(AF_INET, (void *)(frame + 24), who_has, sizeof(who_has)); in traceArp() 175 Snprintf(arpdesc, sizeof(arpdesc), "who-has %s tell %s", who_has, tell); in traceArp() 177 inet_ntop(AF_INET, (void *)(frame + 14), who_has, sizeof(who_has)); in traceArp() 179 "reply %s is-at %02X:%02X:%02X:%02X:%02X:%02X", who_has, in traceArp() 202 char who_has[INET6_ADDRSTRLEN], tgt_is[INET6_ADDRSTRLEN]; in traceND() local 237 inet_ntop(AF_INET6, &msg->nd.icmpv6_target, who_has, sizeof(who_has)); in traceND() 238 Snprintf(desc, sizeof(desc), "who has %s", who_has); in traceND()
|
/dports/devel/py-distributed/distributed-2021.11.2/docs/source/ |
H A D | journey.rst | 97 'who_has': {'x': {(worker_host, port)}, 109 them from the workers listed in the ``who_has`` dictionary also in the message. 110 For each key that it doesn't have it selects a valid worker from ``who_has`` at
|
H A D | scheduling-state.rst | 173 :attr:`TaskState.who_has` …
|
H A D | active_memory_manager.rst | 215 for _ in range(len(self.manager.scheduler.workers) - len(ts.who_has)):
|
H A D | changelog.rst | 400 - Add HTML reprs for ``Client.who_has`` and ``Client.has_what`` (:pr:`4853`) `Jacob Tomlinson`_ 409 - Ensure busy workloads properly look up ``who_has`` (:pr:`4793`) `Florian Jetter`_
|
/dports/science/py-gpaw/gpaw-21.6.0/gpaw/wavefunctions/ |
H A D | base.py | 389 band_rank, myn = self.bd.who_has(n) 440 band_rank, myn = self.bd.who_has(n - 1) 450 band_rank, myn = self.bd.who_has(n)
|
H A D | pw.py | 949 band_rank, myn = self.bd.who_has(n)
|
/dports/science/py-gpaw/gpaw-21.6.0/gpaw/ |
H A D | kpt_descriptor.py | 545 rank, q = self.who_has(k) 555 def who_has(self, k): member in KPointDescriptor
|
H A D | band_descriptor.py | 133 def who_has(self, n): member in BandDescriptor
|
/dports/science/py-gpaw/gpaw-21.6.0/gpaw/analyse/ |
H A D | simple_stm.py | 59 rank, q = kd.who_has(k)
|
/dports/devel/py-distributed/distributed-2021.11.2/distributed/diagnostics/ |
H A D | progress.py | 32 if not complete and ts.who_has:
|
/dports/science/py-gpaw/gpaw-21.6.0/gpaw/response/ |
H A D | kspair.py | 513 kptrank, q = wfs.kd.who_has(k) 517 bandrank, myn = wfs.bd.who_has(n)
|
/dports/misc/py-xgboost/xgboost-1.5.1/python-package/xgboost/ |
H A D | dask.py | 413 who_has = await client.scheduler.who_has(keys=[part.key for part in parts]) 417 for key, workers in who_has.items():
|
/dports/misc/xgboost/xgboost-1.5.1/python-package/xgboost/ |
H A D | dask.py | 413 who_has = await client.scheduler.who_has(keys=[part.key for part in parts]) 417 for key, workers in who_has.items():
|
/dports/sysutils/slurm-wlm/slurm-20.02.7/src/slurmd/slurmd/ |
H A D | req.c | 1153 char *who_has = NULL; in _check_job_credential() local 1156 who_has = "Job"; in _check_job_credential() 1160 who_has = "Step"; in _check_job_credential() 1162 if (cpu_log && who_has) { in _check_job_credential() 1164 host_index, j, who_has); in _check_job_credential()
|
/dports/devel/py-dask/dask-2021.11.2/docs/source/ |
H A D | futures.rst | 892 Client.who_has
|