xref: /dragonfly/sys/kern/kern_sfbuf.c (revision 9bb2a92d)
1 /*
2  * Copyright (c) 1998 David Greenman.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  *
25  * $DragonFly: src/sys/kern/kern_sfbuf.c,v 1.1 2003/12/10 23:48:07 hsu Exp $
26  */
27 
28 #include <sys/param.h>
29 #include <sys/types.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 #include <sys/queue.h>
34 #include <sys/sfbuf.h>
35 #include <vm/vm.h>
36 #include <vm/vm_extern.h>
37 #include <vm/vm_kern.h>
38 #include <vm/vm_page.h>
39 #include <vm/pmap.h>
40 
41 static void sf_buf_init(void *arg);
42 SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL)
43 
44 LIST_HEAD(sf_buf_list, sf_buf);
45 
46 /*
47  * A hash table of active sendfile(2) buffers
48  */
49 static struct sf_buf_list *sf_buf_hashtable;
50 static u_long sf_buf_hashmask;
51 
52 #define	SF_BUF_HASH(m)	(((m) - vm_page_array) & sf_buf_hashmask)
53 
54 static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
55 static u_int sf_buf_alloc_want;
56 
57 static vm_offset_t sf_base;
58 static struct sf_buf *sf_bufs;
59 
60 /*
61  * Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
62  */
63 static void
64 sf_buf_init(void *arg)
65 {
66 	int i;
67 
68 	sf_buf_hashtable = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
69 	TAILQ_INIT(&sf_buf_freelist);
70 	sf_base = kmem_alloc_pageable(kernel_map, nsfbufs * PAGE_SIZE);
71 	sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP,
72 	    M_NOWAIT | M_ZERO);
73 	for (i = 0; i < nsfbufs; i++) {
74 		sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
75 		TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
76 	}
77 }
78 
79 /*
80  * Get an sf_buf from the freelist. Will block if none are available.
81  */
82 struct sf_buf *
83 sf_buf_alloc(struct vm_page *m)
84 {
85 	struct sf_buf_list *hash_chain;
86 	struct sf_buf *sf;
87 	int s;
88 	int error;
89 
90 	s = splimp();
91 	hash_chain = &sf_buf_hashtable[SF_BUF_HASH(m)];
92 	LIST_FOREACH(sf, hash_chain, list_entry) {
93 		if (sf->m == m) {
94 			if (sf->refcnt == 0) {
95 				/* reclaim cached entry off freelist */
96 				TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
97 			}
98 			++sf->refcnt;
99 			goto done;	/* found existing mapping */
100 		}
101 	}
102 
103 	/*
104 	 * Didn't find old mapping.  Get a buffer off the freelist.
105 	 */
106 	while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
107 		++sf_buf_alloc_want;
108 		error = tsleep(&sf_buf_freelist, PCATCH, "sfbufa", 0);
109 		--sf_buf_alloc_want;
110 
111 		/* If we got a signal, don't risk going back to sleep. */
112 		if (error)
113 			goto done;
114 	}
115 	TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
116 
117 	if (sf->m != NULL)	/* remove previous mapping from hash table */
118 		LIST_REMOVE(sf, list_entry);
119 	LIST_INSERT_HEAD(hash_chain, sf, list_entry);
120 	sf->refcnt = 1;
121 	sf->m = m;
122 	pmap_qenter(sf->kva, &sf->m, 1);
123 done:
124 	splx(s);
125 	return (sf);
126 }
127 
128 #define dtosf(x)	(&sf_bufs[((uintptr_t)(x) - (uintptr_t)sf_base) >> PAGE_SHIFT])
129 
130 void
131 sf_buf_ref(caddr_t addr, u_int size)
132 {
133 	struct sf_buf *sf;
134 
135 	sf = dtosf(addr);
136 	if (sf->refcnt == 0)
137 		panic("sf_buf_ref: referencing a free sf_buf");
138 	sf->refcnt++;
139 }
140 
141 /*
142  * Lose a reference to an sf_buf. When none left, detach mapped page
143  * and release resources back to the system.
144  *
145  * Must be called at splimp.
146  */
147 void
148 sf_buf_free(caddr_t addr, u_int size)
149 {
150 	struct sf_buf *sf;
151 	struct vm_page *m;
152 	int s;
153 
154 	sf = dtosf(addr);
155 	if (sf->refcnt == 0)
156 		panic("sf_buf_free: freeing free sf_buf");
157 	sf->refcnt--;
158 	if (sf->refcnt == 0) {
159 		m = sf->m;
160 		s = splvm();
161 		vm_page_unwire(m, 0);
162 		/*
163 		 * Check for the object going away on us. This can
164 		 * happen since we don't hold a reference to it.
165 		 * If so, we're responsible for freeing the page.
166 		 */
167 		if (m->wire_count == 0 && m->object == NULL)
168 			vm_page_free(m);
169 		splx(s);
170 
171 		TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
172 		if (sf_buf_alloc_want > 0)
173 			wakeup_one(&sf_buf_freelist);
174 	}
175 }
176