Lines Matching refs:bnode

3336 	struct kvfree_rcu_bulk_data *bnode)  in put_cached_bnode()  argument
3342 llist_add((struct llist_node *) bnode, &krcp->bkvcache); in put_cached_bnode()
3372 struct kvfree_rcu_bulk_data *bnode, int idx) in kvfree_rcu_bulk() argument
3377 if (!WARN_ON_ONCE(!poll_state_synchronize_rcu_full(&bnode->gp_snap))) { in kvfree_rcu_bulk()
3378 debug_rcu_bhead_unqueue(bnode); in kvfree_rcu_bulk()
3382 rcu_state.name, bnode->nr_records, in kvfree_rcu_bulk()
3383 bnode->records); in kvfree_rcu_bulk()
3385 kfree_bulk(bnode->nr_records, bnode->records); in kvfree_rcu_bulk()
3387 for (i = 0; i < bnode->nr_records; i++) { in kvfree_rcu_bulk()
3389 rcu_state.name, bnode->records[i], 0); in kvfree_rcu_bulk()
3391 vfree(bnode->records[i]); in kvfree_rcu_bulk()
3398 if (put_cached_bnode(krcp, bnode)) in kvfree_rcu_bulk()
3399 bnode = NULL; in kvfree_rcu_bulk()
3402 if (bnode) in kvfree_rcu_bulk()
3403 free_page((unsigned long) bnode); in kvfree_rcu_bulk()
3437 struct kvfree_rcu_bulk_data *bnode, *n; in kfree_rcu_work() local
3463 list_for_each_entry_safe(bnode, n, &bulk_head[i], list) in kfree_rcu_work()
3464 kvfree_rcu_bulk(krcp, bnode, i); in kfree_rcu_work()
3532 struct kvfree_rcu_bulk_data *bnode, *n; in kvfree_rcu_drain_ready() local
3541 list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) { in kvfree_rcu_drain_ready()
3542 if (!poll_state_synchronize_rcu_full(&bnode->gp_snap)) in kvfree_rcu_drain_ready()
3545 atomic_sub(bnode->nr_records, &krcp->bulk_count[i]); in kvfree_rcu_drain_ready()
3546 list_move(&bnode->list, &bulk_ready[i]); in kvfree_rcu_drain_ready()
3558 list_for_each_entry_safe(bnode, n, &bulk_ready[i], list) in kvfree_rcu_drain_ready()
3559 kvfree_rcu_bulk(krcp, bnode, i); in kvfree_rcu_drain_ready()
3658 struct kvfree_rcu_bulk_data *bnode; in fill_page_cache_func() local
3671 bnode = (struct kvfree_rcu_bulk_data *) in fill_page_cache_func()
3674 if (!bnode) in fill_page_cache_func()
3678 pushed = put_cached_bnode(krcp, bnode); in fill_page_cache_func()
3682 free_page((unsigned long) bnode); in fill_page_cache_func()
3722 struct kvfree_rcu_bulk_data *bnode; in add_ptr_to_bulk_krc_lock() local
3730 bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx], in add_ptr_to_bulk_krc_lock()
3734 if (!bnode || bnode->nr_records == KVFREE_BULK_MAX_ENTR) { in add_ptr_to_bulk_krc_lock()
3735 bnode = get_cached_bnode(*krcp); in add_ptr_to_bulk_krc_lock()
3736 if (!bnode && can_alloc) { in add_ptr_to_bulk_krc_lock()
3750 bnode = (struct kvfree_rcu_bulk_data *) in add_ptr_to_bulk_krc_lock()
3755 if (!bnode) in add_ptr_to_bulk_krc_lock()
3759 bnode->nr_records = 0; in add_ptr_to_bulk_krc_lock()
3760 list_add(&bnode->list, &(*krcp)->bulk_head[idx]); in add_ptr_to_bulk_krc_lock()
3764 bnode->nr_records++; in add_ptr_to_bulk_krc_lock()
3765 bnode->records[bnode->nr_records - 1] = ptr; in add_ptr_to_bulk_krc_lock()
3766 get_state_synchronize_rcu_full(&bnode->gp_snap); in add_ptr_to_bulk_krc_lock()