Home
last modified time | relevance | path

Searched refs:npages (Results 1 – 25 of 40) sorted by relevance

12

/dragonfly/sys/dev/drm/ttm/
H A Dttm_page_alloc.c80 unsigned npages; member
630 pool->npages += cpages; in ttm_page_pool_fill_locked()
660 count -= pool->npages; in ttm_page_pool_get_pages()
661 pool->npages = 0; in ttm_page_pool_get_pages()
669 pool->npages -= count; in ttm_page_pool_get_pages()
706 pool->npages++; in ttm_put_pages()
710 npages = 0; in ttm_put_pages()
712 npages = pool->npages - _manager->options.max_size; in ttm_put_pages()
719 if (npages) in ttm_put_pages()
765 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages, 0); in ttm_get_pages()
[all …]
H A Dttm_page_alloc_dma.c367 struct page *pages[], unsigned npages) in ttm_dma_pages_put() argument
379 if (npages && !(pool->type & IS_CACHED) && in ttm_dma_pages_put()
380 ttm_set_pages_array_wb(pages, npages)) in ttm_dma_pages_put()
382 pool->dev_name, npages); in ttm_dma_pages_put()
709 unsigned i, j, npages, cpages; in ttm_dma_pool_alloc_new_pages() local
757 npages = pool->size / PAGE_SIZE; in ttm_dma_pool_alloc_new_pages()
758 for (j = 0; j < npages; ++j) { in ttm_dma_pool_alloc_new_pages()
1002 unsigned count, i, npages = 0; in ttm_dma_unpopulate() local
1069 npages = pool->npages_free - _manager->options.max_size; in ttm_dma_unpopulate()
1080 if (npages) in ttm_dma_unpopulate()
[all …]
/dragonfly/lib/libu4bhid/
H A Dusage.c54 static int npages, npagesmax; variable
62 for (i = 0; i < npages; i++) { in dump_hid_table()
125 if (npages >= npagesmax) { in hid_init()
139 curpage = &pages[npages++]; in hid_init()
166 for (k = 0; k < npages; k++) in hid_usage_page()
181 for (k = 0; k < npages; k++) in hid_usage_in_page()
184 if (k >= npages) in hid_usage_in_page()
210 for (k = 0; k < npages; k++) in hid_parse_usage_page()
228 for (k = 0; k < npages; k++) in hid_parse_usage_in_page()
/dragonfly/contrib/mdocml/
H A Ddba_read.c45 int32_t im, ip, iv, npages; in dba_read() local
49 npages = dbm_page_count(); in dba_read()
50 dba = dba_new(npages < 128 ? 128 : npages); in dba_read()
51 for (ip = 0; ip < npages; ip++) { in dba_read()
H A Ddbm.c69 static int32_t npages; variable
94 if ((npages = be32toh(*dbm_getint(4))) < 0) { in dbm_open()
96 fname, npages); in dbm_open()
139 return npages; in dbm_page_count()
151 assert(ip < npages); in dbm_page_get()
265 ip = npages; in page_bytitle()
273 while (ip < npages) { in page_bytitle()
289 if (ip == npages) { in page_bytitle()
303 if (++ip < npages) { in page_bytitle()
331 for ( ; ip < npages; ip++) in page_byarch()
/dragonfly/sys/dev/virtual/nvmm/
H A Dnvmm_netbsd.c183 os_contigpa_zalloc(paddr_t *pa, vaddr_t *va, size_t npages) in os_contigpa_zalloc() argument
191 ret = uvm_pglistalloc(npages * PAGE_SIZE, 0, ~0UL, PAGE_SIZE, 0, in os_contigpa_zalloc()
196 _va = uvm_km_alloc(kernel_map, npages * PAGE_SIZE, 0, in os_contigpa_zalloc()
201 for (i = 0; i < npages; i++) { in os_contigpa_zalloc()
207 memset((void *)_va, 0, npages * PAGE_SIZE); in os_contigpa_zalloc()
214 for (i = 0; i < npages; i++) { in os_contigpa_zalloc()
221 os_contigpa_free(paddr_t pa, vaddr_t va, size_t npages) in os_contigpa_free() argument
225 pmap_kremove(va, npages * PAGE_SIZE); in os_contigpa_free()
227 uvm_km_free(kernel_map, va, npages * PAGE_SIZE, UVM_KMF_VAONLY); in os_contigpa_free()
228 for (i = 0; i < npages; i++) { in os_contigpa_free()
H A Dnvmm_dragonfly.c252 os_contigpa_zalloc(paddr_t *pa, vaddr_t *va, size_t npages) in os_contigpa_zalloc() argument
256 addr = contigmalloc(npages * PAGE_SIZE, M_NVMM, M_WAITOK | M_ZERO, in os_contigpa_zalloc()
267 os_contigpa_free(paddr_t pa __unused, vaddr_t va, size_t npages) in os_contigpa_free() argument
269 contigfree((void *)va, npages * PAGE_SIZE, M_NVMM); in os_contigpa_free()
/dragonfly/sys/dev/drm/i915/
H A Di915_gem_internal.c53 unsigned int npages; in i915_gem_object_get_pages_internal() local
83 npages = obj->base.size / PAGE_SIZE; in i915_gem_object_get_pages_internal()
84 if (sg_alloc_table(st, npages, GFP_KERNEL)) { in i915_gem_object_get_pages_internal()
94 int order = min(fls(npages) - 1, max_order); in i915_gem_object_get_pages_internal()
112 npages -= 1 << order; in i915_gem_object_get_pages_internal()
113 if (!npages) { in i915_gem_object_get_pages_internal()
H A Di915_gem_userptr.c503 const int npages = obj->base.size >> PAGE_SHIFT;
510 pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_TEMPORARY);
521 while (pinned < npages) {
525 npages - pinned,
542 if (pinned == npages) {
544 npages);
/dragonfly/sys/dev/drm/
H A Dlinux_iomapping.c52 imp->npages = size / PAGE_SIZE; in __ioremap_common()
80 paddr_end = imp->paddr + (imp->npages * PAGE_SIZE) - 1; in iounmap()
90 pmap_change_attr(imp->paddr, imp->npages, PAT_WRITE_BACK); in iounmap()
96 pmap_unmapdev((vm_offset_t)imp->pmap_addr, imp->npages * PAGE_SIZE); in iounmap()
H A Dlinux_vmalloc.c36 int npages; member
62 vmp->npages = count; in vmap()
79 size = vmp->npages * PAGE_SIZE; in vunmap()
81 pmap_qremove((vm_offset_t)addr, vmp->npages); in vunmap()
/dragonfly/usr.sbin/lpr/filters/
H A Dlpf.c60 int npages = 1; variable
194 npages++; in main()
202 npages++; in main()
206 printf("%7.2f\t%s:%s\n", (float)npages, host, name); in main()
/dragonfly/sys/vfs/smbfs/
H A Dsmbfs_io.c402 int i, error, npages; in smbfs_getpages()
433 npages = btoc(count); in smbfs_getpages()
435 pmap_qenter(kva, pages, npages); in smbfs_getpages()
464 pmap_qremove(kva, npages); in smbfs_getpages()
470 for (i = 0; i < npages; i++) { in smbfs_getpages()
479 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) { in smbfs_getpages()
554 int i, npages, count; in smbfs_putpages() local
570 npages = btoc(count); in smbfs_putpages()
572 for (i = 0; i < npages; i++) { in smbfs_putpages()
578 pmap_qenter(kva, pages, npages); in smbfs_putpages()
[all …]
/dragonfly/sys/dev/virtual/virtio/balloon/
H A Dvirtio_balloon.c619 vtballoon_inflate(struct vtballoon_softc *sc, int npages) in vtballoon_inflate() argument
628 if (npages > VTBALLOON_PAGES_PER_REQUEST) in vtballoon_inflate()
629 npages = VTBALLOON_PAGES_PER_REQUEST; in vtballoon_inflate()
631 for (i = 0; i < npages; i++) { in vtballoon_inflate()
655 vtballoon_deflate(struct vtballoon_softc *sc, int npages) in vtballoon_deflate() argument
665 if (npages > VTBALLOON_PAGES_PER_REQUEST) in vtballoon_deflate()
666 npages = VTBALLOON_PAGES_PER_REQUEST; in vtballoon_deflate()
668 for (i = 0; i < npages; i++) { in vtballoon_deflate()
703 int npages) in vtballoon_send_page_frames() argument
713 npages * sizeof(uint32_t)); in vtballoon_send_page_frames()
/dragonfly/lib/libc/db/hash/
H A Dhash_buf.c297 int npages; in __buf_init() local
300 npages = (nbytes + hashp->BSIZE - 1) >> hashp->BSHIFT; in __buf_init()
301 npages = MAX(npages, MIN_BUFFERS); in __buf_init()
303 hashp->nbufs = npages; in __buf_init()
/dragonfly/sys/kern/
H A Dkern_xio.c143 xio_init_pages(xio_t xio, struct vm_page **mbase, int npages, int xflags) in xio_init_pages() argument
147 KKASSERT(npages <= XIO_INTERNAL_PAGES); in xio_init_pages()
151 xio->xio_bytes = npages * PAGE_SIZE; in xio_init_pages()
153 xio->xio_npages = npages; in xio_init_pages()
155 for (i = 0; i < npages; ++i) { in xio_init_pages()
/dragonfly/lib/libc/db/mpool/
H A Dmpool.libtp148 buf_fids[i].npages = -1;
253 if ( buf_fids[fid].npages == -1 ) {
254 /* initialize npages field */
259 *new_pageno = buf_fids[fid].npages;
264 buf_fids[fid].npages++;
560 if ( buf_fids[fid].npages == -1 ) {
561 /* Initialize the npages field */
640 buf_fids[obj->file_id].npages = obj->obj_id+1;
698 if ( buf_fids[fid].npages == -1 ) {
699 /* initialize npages field */
[all …]
H A Dmpool.c86 mp->npages = sb.st_size / pagesize; in mpool_open()
115 if (mp->npages == MAX_PAGE_NUMBER) { in mpool_new()
130 mp->npages++; in mpool_new()
133 bp->pgno = *pgnoaddr = mp->npages++; in mpool_new()
457 fprintf(stderr, "%lu pages in the file\n", mp->npages); in mpool_stat()
/dragonfly/sys/vm/
H A Dswap_pager.c293 swblk_t blk, int npages);
539 blk = blist_allocat(swapblist, npages, 0); in swp_pager_getswapspace()
551 npages); in swp_pager_getswapspace()
560 swapacctspace(blk, -npages); in swp_pager_getswapspace()
562 vm_swap_anon_use += npages; in swp_pager_getswapspace()
564 vm_swap_cache_use += npages; in swp_pager_getswapspace()
588 sp->sw_nused -= npages; in swp_pager_freeswapspace()
590 vm_swap_anon_use -= npages; in swp_pager_freeswapspace()
592 vm_swap_cache_use -= npages; in swp_pager_freeswapspace()
599 blist_free(swapblist, blk, npages); in swp_pager_freeswapspace()
[all …]
H A Dvm_zone.c525 vm_pindex_t npages; in zget() local
595 npages = z->zpagecount - savezpc; in zget()
596 nitems = ((size_t)(savezpc + npages) * PAGE_SIZE - noffset) / in zget()
598 atomic_add_long(&zone_kmem_pages, npages); in zget()
604 for (i = 0; i < npages; ++i) { in zget()
617 for (i = npages; i < nalloc; ++i) { in zget()
H A Ddevice_pager.c316 unsigned int npages; in old_dev_pager_ctor() local
328 npages = OFF_TO_IDX(size); in old_dev_pager_ctor()
329 for (off = foff; npages--; off += PAGE_SIZE) { in old_dev_pager_ctor()
H A Dvm_kern.c111 vm_pindex_t npages; in kmem_alloc_swapbacked() local
114 npages = size / PAGE_SIZE; in kmem_alloc_swapbacked()
120 kp->object = vm_object_allocate(OBJT_DEFAULT, npages); in kmem_alloc_swapbacked()
/dragonfly/sys/dev/drm/radeon/
H A Dradeon_prime.c48 int npages = bo->tbo.num_pages; in radeon_gem_prime_get_sg_table() local
50 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); in radeon_gem_prime_get_sg_table()
/dragonfly/test/debug/
H A Dvnodeinfo.c267 int npages = getobjpages(kd, vn.v_object); in dumpvp() local
269 if (npages || vnpsize) in dumpvp()
270 printf(" vmobjpgs=%d vnpsize=%d", npages, vnpsize); in dumpvp()
/dragonfly/contrib/gcc-4.7/gcc/
H A Dmips-tfile.c4908 allocate_cluster (size_t npages) in allocate_cluster() argument
4921 allocate_cluster (size_t npages) in allocate_cluster() argument
4956 allocate_multiple_pages (size_t npages) in allocate_multiple_pages() argument
4965 if (npages <= pages_left) in allocate_multiple_pages()
4968 cluster_ptr += npages; in allocate_multiple_pages()
4969 pages_left -= npages; in allocate_multiple_pages()
4973 return allocate_cluster (npages); in allocate_multiple_pages()
4976 return xcalloc (npages, PAGE_SIZE); in allocate_multiple_pages()
4991 pages_left = npages; in free_multiple_pages()
4996 cluster_ptr -= npages; in free_multiple_pages()
[all …]

12