/linux/include/linux/ |
H A D | rwbase_rt.h | 12 atomic_t readers; member 18 .readers = ATOMIC_INIT(READER_BIAS), \ 25 atomic_set(&(rwbase)->readers, READER_BIAS); \ 31 return atomic_read(&rwb->readers) != READER_BIAS; in rw_base_is_locked() 36 return atomic_read(&rwb->readers) == WRITER_BIAS; in rw_base_is_write_locked() 41 return atomic_read(&rwb->readers) > 0; in rw_base_is_contended()
|
H A D | rwlock_types.h | 59 atomic_t readers; member
|
/linux/kernel/locking/ |
H A D | rwbase_rt.c | 61 for (r = atomic_read(&rwb->readers); r < 0;) { in rwbase_read_trylock() 62 if (likely(atomic_try_cmpxchg_acquire(&rwb->readers, &r, r + 1))) in rwbase_read_trylock() 123 atomic_inc(&rwb->readers); in __rwbase_read_lock() 177 if (unlikely(atomic_dec_and_test(&rwb->readers))) in rwbase_read_unlock() 190 (void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers); in __rwbase_write_unlock() 223 if (!atomic_read_acquire(&rwb->readers)) { in __rwbase_write_trylock() 224 atomic_set(&rwb->readers, WRITER_BIAS); in __rwbase_write_trylock() 242 atomic_sub(READER_BIAS, &rwb->readers); in rwbase_write_lock() 288 atomic_sub(READER_BIAS, &rwb->readers); in rwbase_write_trylock()
|
/linux/fs/bcachefs/ |
H A D | six.c | 163 this_cpu_sub(*lock->readers, !ret); in __do_six_trylock() 576 lock->readers) { in do_six_unlock_type() 578 this_cpu_dec(*lock->readers); in do_six_unlock_type() 664 if (!lock->readers) { in six_lock_tryupgrade() 672 if (lock->readers) in six_lock_tryupgrade() 673 this_cpu_dec(*lock->readers); in six_lock_tryupgrade() 729 if (lock->readers) { in six_lock_increment() 730 this_cpu_inc(*lock->readers); in six_lock_increment() 818 if (lock->readers) { in six_lock_readers_add() 840 free_percpu(lock->readers); in six_lock_exit() [all …]
|
/linux/fs/btrfs/ |
H A D | locking.c | 332 atomic_set(&lock->readers, 0); in btrfs_drew_lock_init() 341 if (atomic_read(&lock->readers)) in btrfs_drew_try_write_lock() 348 if (atomic_read(&lock->readers)) { in btrfs_drew_try_write_lock() 361 wait_event(lock->pending_writers, !atomic_read(&lock->readers)); in btrfs_drew_write_lock() 377 atomic_inc(&lock->readers); in btrfs_drew_read_lock() 396 if (atomic_dec_and_test(&lock->readers)) in btrfs_drew_read_unlock()
|
H A D | subpage.c | 146 atomic_set(&ret->readers, 0); in btrfs_alloc_subpage() 242 atomic_add(nbits, &subpage->readers); in btrfs_subpage_start_reader() 263 ASSERT(atomic_read(&subpage->readers) >= nbits); in btrfs_subpage_end_reader() 266 last = atomic_sub_and_test(nbits, &subpage->readers); in btrfs_subpage_end_reader() 310 ASSERT(atomic_read(&subpage->readers) == 0); in btrfs_subpage_start_writer()
|
/linux/tools/testing/selftests/kvm/lib/ |
H A D | userfaultfd_util.c | 125 uffd_desc->readers = calloc(sizeof(pthread_t), num_readers); in uffd_setup_demand_paging() 126 TEST_ASSERT(uffd_desc->readers, "Failed to alloc reader threads"); in uffd_setup_demand_paging() 170 pthread_create(&uffd_desc->readers[i], NULL, uffd_handler_thread_fn, in uffd_setup_demand_paging() 190 TEST_ASSERT(!pthread_join(uffd->readers[i], NULL), in uffd_stop_demand_paging() 201 free(uffd->readers); in uffd_stop_demand_paging()
|
/linux/drivers/misc/cardreader/ |
H A D | Kconfig | 9 Alcor Micro card readers support access to many types of memory cards, 21 Realtek card readers support access to many types of memory cards, 30 Select this option to get support for Realtek USB 2.0 card readers
|
/linux/Documentation/RCU/ |
H A D | rcu.rst | 10 must be long enough that any readers accessing the item being deleted have 21 The advantage of RCU's two-part approach is that RCU readers need 26 in read-mostly situations. The fact that RCU readers need not 30 if the RCU readers give no indication when they are done? 32 Just as with spinlocks, RCU readers are not permitted to 42 same effect, but require that the readers manipulate CPU-local
|
H A D | checklist.rst | 100 locks (that are acquired by both readers and writers) 101 that guard per-element state. Fields that the readers 107 c. Make updates appear atomic to readers. For example, 111 appear to be atomic to RCU readers, nor will sequences 162 is common to readers and updaters. However, lockdep 247 call_rcu_tasks(), then the readers must refrain from 253 corresponding readers must use rcu_read_lock_trace() 257 then the corresponding readers must use anything that 439 when you need lightweight readers. 459 that readers can follow that could be affected by the [all …]
|
H A D | whatisRCU.rst | 81 readers is the semantics of modern CPUs guarantee that readers will see 101 readers cannot gain a reference to it. 103 b. Wait for all previous readers to complete their RCU read-side 111 The ability to wait until all readers are done allows RCU readers to 121 readers. Concurrent RCU readers can then continue accessing the old 228 readers are done, its implementation is key to RCU. For RCU 509 This primitive protects concurrent readers from the updater, 1155 a. Will readers need to block? If so, you need SRCU. 1178 If so, you should disable softirq across your readers, for 1235 allows latency to "bleed" from readers to other [all …]
|
H A D | lockdep.rst | 43 invoked by both RCU readers and updaters. 47 is invoked by both RCU-bh readers and updaters. 51 is invoked by both RCU-sched readers and updaters. 55 is invoked by both SRCU readers and updaters.
|
H A D | listRCU.rst | 63 ``tasklist_lock``. To prevent readers from noticing changes in the list 67 any readers traversing the list will see valid ``p->tasks.next`` pointers 71 all existing readers finish, which guarantees that the ``task_struct`` 73 of all RCU readers that might possibly have a reference to that object. 219 need for writers to exclude readers. 226 readers to fail spectacularly. 228 So, when readers can tolerate stale data and when entries are either added or
|
/linux/drivers/misc/ibmasm/ |
H A D | event.c | 30 list_for_each_entry(reader, &sp->event_buffer->readers, node) in wake_up_event_readers() 123 list_add(&reader->node, &sp->event_buffer->readers); in ibmasm_event_reader_register() 153 INIT_LIST_HEAD(&buffer->readers); in ibmasm_event_buffer_init()
|
/linux/Documentation/locking/ |
H A D | lockdep-design.rst | 410 r: stands for non-recursive readers. 411 R: stands for recursive readers. 424 The difference between recursive readers and non-recursive readers is because: 426 readers could get blocked by a write lock *waiter*. Considering the follow 442 Block conditions on readers/writers of the same lock instance: 448 3. Writers block both recursive readers and non-recursive readers. 449 4. And readers (recursive or not) don't block other recursive readers but 450 may block non-recursive readers (because of the potential co-existing 465 (W: writers, r: non-recursive readers, R: recursive readers) 527 recursive readers and non-recursive readers for L1 (as they block the same types) and [all …]
|
H A D | seqlock.rst | 9 lockless readers (read-only retry loops), and no writer starvation. They 23 is odd and indicates to the readers that an update is in progress. At 25 even again which lets readers make progress. 153 from interruption by readers. This is typically the case when the read 195 1. Normal Sequence readers which never block a writer but they must 206 2. Locking readers which will wait if a writer or another locking reader 218 according to a passed marker. This is used to avoid lockless readers
|
H A D | locktypes.rst | 95 readers. 135 rw_semaphore is a multiple readers and single writer lock mechanism. 141 exist special-purpose interfaces that allow non-owner release for readers. 151 readers, a preempted low-priority reader will continue holding its lock, 152 thus starving even high-priority writers. In contrast, because readers 155 writer from starving readers. 299 rwlock_t is a multiple readers and single writer lock mechanism. 314 readers, a preempted low-priority reader will continue holding its lock, 315 thus starving even high-priority writers. In contrast, because readers 318 preventing that writer from starving readers.
|
/linux/fs/ |
H A D | pipe.c | 424 !READ_ONCE(pipe->readers); in pipe_writable() 457 if (!pipe->readers) { in pipe_write() 498 if (!pipe->readers) { in pipe_write() 700 if (!pipe->readers) in pipe_poll() 729 pipe->readers--; in pipe_release() 734 if (!pipe->readers != !pipe->writers) { in pipe_release() 896 pipe->readers = pipe->writers = 1; in get_pipe_inode() 1153 if (pipe->readers++ == 0) in fifo_open() 1182 if (!is_pipe && !pipe->readers) { in fifo_open() 1196 pipe->readers++; in fifo_open() [all …]
|
/linux/drivers/soc/aspeed/ |
H A D | aspeed-p2a-ctrl.c | 65 u32 readers; member 192 ctrl->readers += 1; in aspeed_p2a_ioctl() 275 priv->parent->readers -= priv->read; in aspeed_p2a_release() 299 if (!open_regions && priv->parent->readers == 0) in aspeed_p2a_release()
|
/linux/drivers/hid/ |
H A D | hid-roccat.c | 47 struct list_head readers; member 191 list_add_tail(&reader->node, &device->readers); in roccat_open() 270 list_for_each_entry(reader, &device->readers, node) { in roccat_report_event() 339 INIT_LIST_HEAD(&device->readers); in roccat_connect()
|
/linux/tools/testing/selftests/kvm/include/ |
H A D | userfaultfd_util.h | 31 pthread_t *readers; member
|
/linux/drivers/iio/ |
H A D | TODO | 14 to state struct and using property handlers and readers.
|
/linux/drivers/md/dm-vdo/indexer/ |
H A D | volume-index.c | 806 struct buffered_reader **readers, in start_restoring_volume_sub_index() argument 820 result = uds_read_from_buffered_reader(readers[i], buffer, in start_restoring_volume_sub_index() 873 result = uds_read_from_buffered_reader(readers[i], decoded, in start_restoring_volume_sub_index() 891 result = uds_start_restoring_delta_index(&sub_index->delta_index, readers, in start_restoring_volume_sub_index() 983 struct buffered_reader **readers, unsigned int reader_count) in uds_load_volume_index() argument 988 result = start_restoring_volume_index(volume_index, readers, reader_count); in uds_load_volume_index() 992 result = finish_restoring_volume_index(volume_index, readers, reader_count); in uds_load_volume_index() 999 result = uds_check_guard_delta_lists(readers, reader_count); in uds_load_volume_index()
|
H A D | index-layout.c | 914 struct buffered_reader *readers[MAX_ZONES]; in uds_load_index_state() local 924 result = open_region_reader(layout, &isl->open_chapter, &readers[0]); in uds_load_index_state() 928 result = uds_load_open_chapter(index, readers[0]); in uds_load_index_state() 929 uds_free_buffered_reader(readers[0]); in uds_load_index_state() 935 &readers[zone]); in uds_load_index_state() 938 uds_free_buffered_reader(readers[zone - 1]); in uds_load_index_state() 944 result = uds_load_volume_index(index->volume_index, readers, isl->zone_count); in uds_load_index_state() 946 uds_free_buffered_reader(readers[zone]); in uds_load_index_state() 950 result = open_region_reader(layout, &isl->index_page_map, &readers[0]); in uds_load_index_state() 954 result = uds_read_index_page_map(index->volume->index_page_map, readers[0]); in uds_load_index_state() [all …]
|
/linux/Documentation/trace/ |
H A D | ring-buffer-map.rst | 43 Concurrent readers (either another application mapping that ring-buffer or the 45 the ring-buffer and the output is unpredictable, just like concurrent readers on
|