/linux/drivers/net/wireless/realtek/rtlwifi/ |
H A D | ps.h | 14 void rtl_lps_enter(struct ieee80211_hw *hw, bool may_block); 15 void rtl_lps_leave(struct ieee80211_hw *hw, bool may_block);
|
H A D | ps.c | 656 void rtl_lps_enter(struct ieee80211_hw *hw, bool may_block) in rtl_lps_enter() argument 660 if (may_block) in rtl_lps_enter() 667 void rtl_lps_leave(struct ieee80211_hw *hw, bool may_block) in rtl_lps_leave() argument 671 if (may_block) in rtl_lps_leave()
|
/linux/include/linux/ |
H A D | dm-region-hash.h | 67 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block); 69 enum dm_rh_region_states state, int may_block);
|
H A D | nfs_fs.h | 563 u32 *mask, bool may_block);
|
H A D | kvm_host.h | 268 bool may_block; member
|
/linux/arch/riscv/kvm/ |
H A D | mmu.c | 270 gpa_t size, bool may_block) in gstage_unmap_range() argument 300 if (may_block && addr < end) in gstage_unmap_range() 549 range->may_block); in kvm_unmap_gfn_range()
|
/linux/arch/arm64/kvm/ |
H A D | mmu.c | 320 bool may_block) in __unmap_stage2_range() argument 328 may_block)); in __unmap_stage2_range() 332 u64 size, bool may_block) in kvm_stage2_unmap_range() argument 334 __unmap_stage2_range(mmu, start, size, may_block); in kvm_stage2_unmap_range() 1914 range->may_block); in kvm_unmap_gfn_range() 1916 kvm_nested_s2_unmap(kvm, range->may_block); in kvm_unmap_gfn_range()
|
H A D | nested.c | 758 void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block) in kvm_nested_s2_unmap() argument 768 kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block); in kvm_nested_s2_unmap()
|
/linux/arch/arm64/include/asm/ |
H A D | kvm_nested.h | 129 extern void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block);
|
H A D | kvm_mmu.h | 170 u64 size, bool may_block);
|
/linux/drivers/md/ |
H A D | dm-region-hash.c | 341 int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block) in dm_rh_get_state() argument 357 r = rh->log->type->in_sync(rh->log, region, may_block); in dm_rh_get_state()
|
H A D | dm-raid1.c | 561 int may_block) in region_in_sync() argument 563 int state = dm_rh_get_state(ms->rh, region, may_block); in region_in_sync()
|
/linux/fs/nfs/ |
H A D | dir.c | 3045 …s_access_get_cached_locked(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block) in nfs_access_get_cached_locked() argument 3067 if (!may_block) in nfs_access_get_cached_locked() 3124 u32 *mask, bool may_block) in nfs_access_get_cached() argument 3131 may_block); in nfs_access_get_cached() 3251 bool may_block = (mask & MAY_NOT_BLOCK) == 0; in nfs_do_access() local 3257 status = nfs_access_get_cached(inode, cred, &cache.mask, may_block); in nfs_do_access() 3262 if (!may_block) in nfs_do_access()
|
/linux/virt/kvm/ |
H A D | kvm_main.c | 557 bool may_block; member 634 gfn_range.may_block = range->may_block; in __kvm_handle_hva_range() 681 .may_block = false, in kvm_handle_hva_range() 699 .may_block = false, in kvm_handle_hva_range_no_flush() 763 .may_block = mmu_notifier_range_blockable(range), in kvm_mmu_notifier_invalidate_range_start() 839 .may_block = mmu_notifier_range_blockable(range), in kvm_mmu_notifier_invalidate_range_end() 2455 gfn_range.may_block = range->may_block; in kvm_handle_gfn_range() 2516 .may_block = true, in kvm_vm_set_mem_attributes() 2524 .may_block = true, in kvm_vm_set_mem_attributes()
|
H A D | guest_memfd.c | 120 .may_block = true, in kvm_gmem_invalidate_begin()
|
/linux/fs/fuse/ |
H A D | fuse_i.h | 309 bool may_block:1; member
|
H A D | virtio_fs.c | 836 if (req->args->may_block) { in virtio_fs_requests_done_work()
|
H A D | file.c | 793 ia->ap.args.may_block = io->should_dirty; in fuse_async_req_send()
|
/linux/arch/x86/kvm/mmu/ |
H A D | tdp_mmu.c | 1193 range->may_block, flush); in kvm_tdp_mmu_unmap_gfn_range()
|
H A D | mmu.c | 1573 range->may_block, flush); in kvm_unmap_gfn_range() 7102 .may_block = true, in kvm_mmu_zap_memslot()
|