Home
last modified time | relevance | path

Searched refs:tbl (Results 1 – 25 of 329) sorted by relevance

12345678910>>...14

/linux/fs/nfs/
H A Dnfs4session.c45 p = &tbl->slots; in nfs4_shrink_slot_table()
53 tbl->max_slots--; in nfs4_shrink_slot_table()
111 slot->table = tbl; in nfs4_new_slot()
125 p = &tbl->slots; in nfs4_find_or_create_slot()
128 *p = nfs4_new_slot(tbl, tbl->max_slots, in nfs4_find_or_create_slot()
132 tbl->max_slots++; in nfs4_find_or_create_slot()
248 __func__, tbl->used_slots[0], tbl->highest_used_slotid, in nfs4_alloc_slot()
250 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1); in nfs4_alloc_slot()
257 __func__, tbl->used_slots[0], tbl->highest_used_slotid, in nfs4_alloc_slot()
279 p = &tbl->slots; in nfs4_reset_slot_table()
[all …]
H A Dnfs4session.h83 extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
85 extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
86 extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
88 extern int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
93 extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
94 bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
96 void nfs41_wake_slot_table(struct nfs4_slot_table *tbl);
100 return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state); in nfs4_slot_tbl_draining()
106 return !!test_bit(slotid, tbl->used_slots); in nfs4_test_locked_slot()
115 extern void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/
H A Ddr_table.c163 ret = dr_table_init_nic(tbl->dmn, &tbl->rx); in dr_table_init_fdb()
167 ret = dr_table_init_nic(tbl->dmn, &tbl->tx); in dr_table_init_fdb()
189 tbl->rx.nic_dmn = &tbl->dmn->info.rx; in dr_table_init()
190 ret = dr_table_init_nic(tbl->dmn, &tbl->rx); in dr_table_init()
194 tbl->tx.nic_dmn = &tbl->dmn->info.tx; in dr_table_init()
195 ret = dr_table_init_nic(tbl->dmn, &tbl->tx); in dr_table_init()
199 tbl->rx.nic_dmn = &tbl->dmn->info.rx; in dr_table_init()
200 tbl->tx.nic_dmn = &tbl->dmn->info.tx; in dr_table_init()
258 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL); in mlx5dr_table_create()
259 if (!tbl) in mlx5dr_table_create()
[all …]
/linux/arch/powerpc/kernel/
H A Diommu.c381 tbl->it_ops->flush(tbl); in iommu_alloc()
451 tbl->it_ops->clear(tbl, entry, npages); in __iommu_free()
468 tbl->it_ops->flush(tbl); in iommu_free()
579 tbl->it_ops->flush(tbl); in ppc_iommu_map_sg()
643 tbl->it_ops->flush(tbl); in ppc_iommu_unmap_sg()
655 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); in iommu_table_clear()
665 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); in iommu_table_clear()
753 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; in iommu_init_table()
810 tbl->it_ops->free(tbl); in iommu_table_free()
1040 tbl->it_ops->flush(tbl); in iommu_flush_tce()
[all …]
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_doorbell.c56 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; in pvrdma_uar_table_init() local
61 tbl->last = 0; in pvrdma_uar_table_init()
62 tbl->top = 0; in pvrdma_uar_table_init()
78 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; in pvrdma_uar_table_cleanup() local
89 tbl = &dev->uar_table.tbl; in pvrdma_uar_alloc()
92 obj = find_next_zero_bit(tbl->table, tbl->max, tbl->last); in pvrdma_uar_alloc()
94 tbl->top = (tbl->top + tbl->max) & tbl->mask; in pvrdma_uar_alloc()
95 obj = find_first_zero_bit(tbl->table, tbl->max); in pvrdma_uar_alloc()
117 struct pvrdma_id_table *tbl = &dev->uar_table.tbl; in pvrdma_uar_free() local
124 tbl->last = min(tbl->last, obj); in pvrdma_uar_free()
[all …]
/linux/include/linux/
H A Drhashtable.h289 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : in rht_bucket()
296 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : in rht_bucket_var()
435 tbl, hash)
461 rht_ptr(rht_bucket(tbl, hash), tbl, hash), \
602 tbl = rht_dereference_rcu(ht->tbl, ht); in __rhashtable_lookup()
622 tbl = rht_dereference_rcu(tbl->future_tbl, ht); in __rhashtable_lookup()
724 tbl = rht_dereference_rcu(ht->tbl, ht); in __rhashtable_insert_fast()
1086 tbl = rht_dereference_rcu(ht->tbl, ht); in __rhashtable_remove_fast()
1095 (tbl = rht_dereference_rcu(tbl->future_tbl, ht))) in __rhashtable_remove_fast()
1221 tbl = rht_dereference_rcu(ht->tbl, ht); in rhashtable_replace_fast()
[all …]
/linux/tools/perf/util/
H A Dsyscalltbl.c77 if (tbl->syscalls.entries == NULL) in syscalltbl__init_native()
96 struct syscalltbl *tbl = malloc(sizeof(*tbl)); in syscalltbl__new() local
97 if (tbl) { in syscalltbl__new()
99 free(tbl); in syscalltbl__new()
103 return tbl; in syscalltbl__new()
108 zfree(&tbl->syscalls.entries); in syscalltbl__delete()
109 free(tbl); in syscalltbl__delete()
153 struct syscalltbl *tbl = zalloc(sizeof(*tbl)); in syscalltbl__new() local
154 if (tbl) in syscalltbl__new()
156 return tbl; in syscalltbl__new()
[all …]
/linux/arch/powerpc/platforms/powernv/
H A Dpci-ioda-tce.c52 tbl->it_blocksize = 16; in pnv_pci_setup_iommu_table()
55 tbl->it_offset = dma_offset >> tbl->it_page_shift; in pnv_pci_setup_iommu_table()
56 tbl->it_index = 0; in pnv_pci_setup_iommu_table()
58 tbl->it_busno = 0; in pnv_pci_setup_iommu_table()
59 tbl->it_type = TCE_PCI; in pnv_pci_setup_iommu_table()
85 __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base; in pnv_tce()
186 return pnv_tce(tbl, true, index - tbl->it_offset, alloc); in pnv_tce_useraddrptr()
208 __be64 *ptce = pnv_tce(tbl, false, index - tbl->it_offset, false); in pnv_tce_get()
243 tbl->it_level_size : tbl->it_size; in pnv_pci_ioda2_table_free_pages()
245 if (!tbl->it_size) in pnv_pci_ioda2_table_free_pages()
[all …]
/linux/lib/
H A Drhashtable.c96 unsigned int size = tbl->size >> tbl->nest; in nested_bucket_table_free()
159 size = sizeof(*tbl) + sizeof(tbl->buckets[0]); in nested_bucket_table_alloc()
163 if (!tbl) in nested_bucket_table_alloc()
222 tbl = rht_dereference_rcu(tbl->future_tbl, ht); in rhashtable_last_table()
423 tbl = rht_dereference(ht->tbl, ht); in rht_deferred_worker()
424 tbl = rhashtable_last_table(ht, tbl); in rht_deferred_worker()
797 struct bucket_table *tbl = iter->walker.tbl; in __rhashtable_walk_find_next() local
1079 RCU_INIT_POINTER(ht->tbl, tbl); in rhashtable_init_noprof()
1150 tbl = rht_dereference(ht->tbl, ht); in rhashtable_free_and_destroy()
1189 unsigned int size = tbl->size >> tbl->nest; in __rht_bucket_nested()
[all …]
/linux/drivers/net/wireless/intel/iwlwifi/dvm/
H A Drs.c549 if (is_siso(tbl->lq_type) && tbl->is_SGI) { in rate_n_flags_from_tbl()
580 tbl->is_dup = 0; in rs_get_tbl_info_from_mcs()
633 if (!tbl->ant_type || tbl->ant_type > ANT_ABC) in rs_toggle_antenna()
1523 tbl->action++; in rs_move_legacy_other()
1536 tbl->action++; in rs_move_legacy_other()
1706 tbl->action++; in rs_move_siso_to_other()
1873 tbl->action++; in rs_move_mimo2_to_other()
2047 tbl->action++; in rs_move_mimo3_to_other()
2452 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) in rs_rate_scale_perform()
2456 (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) { in rs_rate_scale_perform()
[all …]
H A Drs.h256 #define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A)) argument
257 #define is_siso(tbl) ((tbl) == LQ_SISO) argument
258 #define is_mimo2(tbl) ((tbl) == LQ_MIMO2) argument
259 #define is_mimo3(tbl) ((tbl) == LQ_MIMO3) argument
260 #define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl)) argument
261 #define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl)) argument
262 #define is_a_band(tbl) ((tbl) == LQ_A) argument
263 #define is_g_and(tbl) ((tbl) == LQ_G) argument
/linux/net/netfilter/ipvs/
H A Dip_vs_lblc.c236 tbl->dead = true; in ip_vs_lblc_flush()
278 tbl->rover = j; in ip_vs_lblc_full_check()
310 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblc_check_expire()
311 tbl->counter++; in ip_vs_lblc_check_expire()
315 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblc_check_expire()
335 tbl->rover = j; in ip_vs_lblc_check_expire()
350 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); in ip_vs_lblc_init_svc()
351 if (tbl == NULL) in ip_vs_lblc_init_svc()
365 tbl->rover = 0; in ip_vs_lblc_init_svc()
368 tbl->svc = svc; in ip_vs_lblc_init_svc()
[all …]
H A Dip_vs_lblcr.c442 tbl->rover = j; in ip_vs_lblcr_full_check()
474 if (atomic_read(&tbl->entries) <= tbl->max_size) { in ip_vs_lblcr_check_expire()
475 tbl->counter++; in ip_vs_lblcr_check_expire()
479 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3; in ip_vs_lblcr_check_expire()
499 tbl->rover = j; in ip_vs_lblcr_check_expire()
513 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL); in ip_vs_lblcr_init_svc()
514 if (tbl == NULL) in ip_vs_lblcr_init_svc()
528 tbl->rover = 0; in ip_vs_lblcr_init_svc()
531 tbl->svc = svc; in ip_vs_lblcr_init_svc()
689 if (!tbl->dead) in ip_vs_lblcr_schedule()
[all …]
/linux/arch/powerpc/include/asm/
H A Diommu.h43 int (*set)(struct iommu_table *tbl,
54 int (*xchg_no_kill)(struct iommu_table *tbl,
59 void (*tce_kill)(struct iommu_table *tbl,
65 void (*clear)(struct iommu_table *tbl,
69 void (*flush)(struct iommu_table *tbl);
70 void (*free)(struct iommu_table *tbl);
121 ((tbl)->it_ops->useraddrptr((tbl), (entry), false))
123 ((tbl)->it_ops->useraddrptr((tbl), (entry), true))
215 struct iommu_table *tbl,
297 (tbl)->it_offset, (tbl)->it_size, \
[all …]
/linux/drivers/net/wireless/marvell/mwifiex/
H A D11n_rxreorder.c109 min((start_win - tbl->start_win), tbl->win_size) : in mwifiex_11n_dispatch_pkt_until_start_win()
170 tbl->rx_reorder_ptr[j] = tbl->rx_reorder_ptr[i + j]; in mwifiex_11n_scan_and_dispatch()
194 if (!tbl) in mwifiex_del_rx_reorder_entry()
217 kfree(tbl); in mwifiex_del_rx_reorder_entry()
236 if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) { in mwifiex_11n_get_rx_reorder_tbl()
238 return tbl; in mwifiex_11n_get_rx_reorder_tbl()
342 if (tbl) { in mwifiex_11n_create_rx_reorder_tbl()
558 if (!tbl) { in mwifiex_11n_rx_reorder_pkt()
680 if (!tbl) { in mwifiex_del_ba_tbl()
734 if (tbl) in mwifiex_ret_11n_addba_resp()
[all …]
/linux/drivers/net/wireless/intel/iwlegacy/
H A D4965-rs.c517 if (tbl->is_SGI) in il4965_rate_n_flags_from_tbl()
522 if (is_siso(tbl->lq_type) && tbl->is_SGI) { in il4965_rate_n_flags_from_tbl()
554 tbl->is_dup = 0; in il4965_rs_get_tbl_info_from_mcs()
602 if (!tbl->ant_type || tbl->ant_type > ANT_ABC) in il4965_rs_toggle_antenna()
1032 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup)) in il4965_rs_set_expected_tpt_table()
1352 tbl->action++; in il4965_rs_move_legacy_other()
1365 tbl->action++; in il4965_rs_move_legacy_other()
1476 tbl->action++; in il4965_rs_move_siso_to_other()
1488 tbl->action++; in il4965_rs_move_siso_to_other()
1601 tbl->action++; in il4965_rs_move_mimo2_to_other()
[all …]
/linux/net/core/
H A Dneighbour.c510 n->tbl = tbl; in neigh_alloc()
807 if (tbl->pconstructor && tbl->pconstructor(n)) { in pneigh_lookup()
1720 p->tbl = tbl; in neigh_parms_alloc()
1785 list_add(&tbl->parms.list, &tbl->parms_list); in neigh_table_init()
1807 if (!tbl->nht || !tbl->phash_buckets) in neigh_table_init()
2343 if (!tbl) in neightbl_set()
3299 struct neigh_table *tbl = state->tbl; in pneigh_get_first() local
3322 struct neigh_table *tbl = state->tbl; in pneigh_get_next() local
3378 state->tbl = tbl; in neigh_seq_start()
3422 struct neigh_table *tbl = state->tbl; in neigh_seq_stop() local
[all …]
/linux/drivers/firmware/efi/
H A Dmemattr.c29 tbl = early_memremap(efi_mem_attr_table, sizeof(*tbl)); in efi_memattr_init()
30 if (!tbl) { in efi_memattr_init()
36 if (tbl->version > 2) { in efi_memattr_init()
38 tbl->version); in efi_memattr_init()
42 tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size; in efi_memattr_init()
47 early_memunmap(tbl, sizeof(*tbl)); in efi_memattr_init()
135 if (tbl_size <= sizeof(*tbl)) in efi_memattr_apply_permissions()
148 if (!tbl) { in efi_memattr_apply_permissions()
154 if (tbl->version > 1 && in efi_memattr_apply_permissions()
167 valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size, in efi_memattr_apply_permissions()
[all …]
/linux/net/netfilter/
H A Dxt_repldata.h24 } *tbl; \
28 tbl = kzalloc(term_offset + sizeof(*term), GFP_KERNEL); \
29 if (tbl == NULL) \
32 strscpy_pad(tbl->repl.name, info->name, sizeof(tbl->repl.name)); \
34 tbl->repl.valid_hooks = hook_mask; \
35 tbl->repl.num_entries = nhooks + 1; \
36 tbl->repl.size = nhooks * sizeof(struct type##_standard) + \
41 tbl->repl.hook_entry[hooknum] = bytes; \
42 tbl->repl.underflow[hooknum] = bytes; \
43 tbl->entries[i++] = (struct type##_standard) \
[all …]
/linux/ipc/
H A Dipc_sysctl.c247 struct ctl_table *tbl; in setup_ipc_sysctls() local
252 if (tbl) { in setup_ipc_sysctls()
257 tbl[i].data = &ns->shm_ctlmax; in setup_ipc_sysctls()
260 tbl[i].data = &ns->shm_ctlall; in setup_ipc_sysctls()
263 tbl[i].data = &ns->shm_ctlmni; in setup_ipc_sysctls()
269 tbl[i].data = &ns->msg_ctlmax; in setup_ipc_sysctls()
278 tbl[i].data = &ns->sem_ctls; in setup_ipc_sysctls()
290 tbl[i].data = NULL; in setup_ipc_sysctls()
297 kfree(tbl); in setup_ipc_sysctls()
307 const struct ctl_table *tbl; in retire_ipc_sysctls() local
[all …]
H A Dmq_sysctl.c119 struct ctl_table *tbl; in setup_mq_sysctls() local
124 if (tbl) { in setup_mq_sysctls()
129 tbl[i].data = &ns->mq_queues_max; in setup_mq_sysctls()
132 tbl[i].data = &ns->mq_msg_max; in setup_mq_sysctls()
135 tbl[i].data = &ns->mq_msgsize_max; in setup_mq_sysctls()
143 tbl[i].data = NULL; in setup_mq_sysctls()
147 "fs/mqueue", tbl, in setup_mq_sysctls()
151 kfree(tbl); in setup_mq_sysctls()
161 const struct ctl_table *tbl; in retire_mq_sysctls() local
163 tbl = ns->mq_sysctls->ctl_table_arg; in retire_mq_sysctls()
[all …]
/linux/drivers/vfio/
H A Dvfio_iommu_spapr_tce.c211 if (tbl) { in tce_iommu_find_table()
217 *ptbl = tbl; in tce_iommu_find_table()
364 if (!tbl) in tce_iommu_release()
367 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); in tce_iommu_release()
442 if (tbl->it_indirect_levels && tbl->it_userspace) { in tce_iommu_clear()
695 *start_addr = tbl->it_offset << tbl->it_page_shift; in tce_iommu_create_window()
721 BUG_ON(!tbl->it_size); in tce_iommu_remove_window()
741 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); in tce_iommu_remove_window()
968 iommu_flush_tce(tbl); in tce_iommu_ioctl()
1012 iommu_flush_tce(tbl); in tce_iommu_ioctl()
[all …]
/linux/net/mac80211/
H A Dmesh_pathtbl.c82 struct mesh_table *tbl = tblptr; in mesh_path_rht_free() local
84 mesh_path_free_rcu(tbl, mpath); in mesh_path_rht_free()
91 atomic_set(&tbl->entries, 0); in mesh_table_init()
339 struct mesh_table *tbl; in mesh_path_add_gate() local
354 spin_lock(&tbl->gates_lock); in mesh_path_add_gate()
683 struct mesh_table *tbl; in mesh_path_add() local
725 struct mesh_table *tbl; in mpp_path_add() local
802 mesh_gate_del(tbl, mpath); in mesh_path_free_rcu()
806 atomic_dec(&tbl->entries); in mesh_path_free_rcu()
966 struct mesh_table *tbl; in mesh_path_send_to_gates() local
[all …]
/linux/arch/powerpc/kvm/
H A Dbook3s_64_vio.c66 iommu_tce_table_put(stit->tbl); in kvm_spapr_tce_iommu_table_free()
161 if (!tbl) in kvm_spapr_tce_attach_iommu_group()
166 if (tbl != stit->tbl) in kvm_spapr_tce_attach_iommu_group()
171 iommu_tce_table_put(tbl); in kvm_spapr_tce_attach_iommu_group()
186 iommu_tce_table_put(tbl); in kvm_spapr_tce_attach_iommu_group()
190 stit->tbl = tbl; in kvm_spapr_tce_attach_iommu_group()
415 u64 *tbl; in kvmppc_tce_put() local
431 tbl = page_to_virt(page); in kvmppc_tce_put()
611 stit->tbl, entry); in kvmppc_h_put_tce()
750 stit->tbl, entry + i); in kvmppc_h_stuff_tce()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dmod_hdr.c44 mutex_init(&tbl->lock); in mlx5e_mod_hdr_tbl_init()
45 hash_init(tbl->hlist); in mlx5e_mod_hdr_tbl_init()
50 WARN_ON(!hash_empty(tbl->hlist)); in mlx5e_mod_hdr_tbl_destroy()
51 mutex_destroy(&tbl->lock); in mlx5e_mod_hdr_tbl_destroy()
73 struct mod_hdr_tbl *tbl, in mlx5e_mod_hdr_attach() argument
90 mutex_lock(&tbl->lock); in mlx5e_mod_hdr_attach()
93 mutex_unlock(&tbl->lock); in mlx5e_mod_hdr_attach()
105 mutex_unlock(&tbl->lock); in mlx5e_mod_hdr_attach()
116 mutex_unlock(&tbl->lock); in mlx5e_mod_hdr_attach()
140 struct mod_hdr_tbl *tbl, in mlx5e_mod_hdr_detach() argument
[all …]

12345678910>>...14