1 /*
2  * Coverity Scan model
3  * https://scan.coverity.com/models
4  *
5  * This is a modeling file for Coverity Scan.
6  * Modeling helps to avoid false positives.
7  *
8  * - Modeling doesn't need full structs and typedefs. Rudimentary structs
9  *   and similar types are sufficient.
10  * - An uninitialized local pointer is not an error. It signifies that the
11  *   variable could be either NULL or have some data.
12  *
13  * Coverity Scan doesn't pick up modifications automatically. The model file
14  * must be uploaded by an admin in the analysis settings.
15  *
16  * Some of this initially cribbed from:
17  *
18  * https://github.com/kees/coverity-linux/blob/trunk/model.c
19  *
20  * The below model was based on the original model by Brian Behlendorf for the
21  * original zfsonlinux/zfs repository. Some inspiration was taken from
22  * kees/coverity-linux, specifically involving memory copies.
23  */
24 
25 #include <stdarg.h>
26 
27 #define	KM_NOSLEEP		0x0001  /* cannot block for memory; may fail */
28 
29 #define	UMEM_DEFAULT		0x0000  /* normal -- may fail */
30 #define	UMEM_NOFAIL		0x0100  /* Never fails */
31 
32 #define	NULL	(0)
33 
34 typedef enum {
35 	B_FALSE = 0,
36 	B_TRUE = 1
37 } boolean_t;
38 
39 typedef unsigned int uint_t;
40 
41 int condition0, condition1;
42 
43 int
44 ddi_copyin(const void *from, void *to, size_t len, int flags)
45 {
46 	(void) flags;
47 	__coverity_negative_sink__(len);
48 	__coverity_tainted_data_argument__(from);
49 	__coverity_tainted_data_argument__(to);
50 	__coverity_writeall__(to);
51 }
52 
53 void *
54 memset(void *dst, int c, size_t len)
55 {
56 	__coverity_negative_sink__(len);
57 	if (c == 0)
58 		__coverity_writeall0__(dst);
59 	else
60 		__coverity_writeall__(dst);
61 	return (dst);
62 }
63 
64 void *
65 memmove(void *dst, void *src, size_t len)
66 {
67 	int first = ((char *)src)[0];
68 	int last = ((char *)src)[len-1];
69 
70 	__coverity_negative_sink__(len);
71 	__coverity_writeall__(dst);
72 	return (dst);
73 }
74 
75 void *
76 memcpy(void *dst, void *src, size_t len)
77 {
78 	int first = ((char *)src)[0];
79 	int last = ((char *)src)[len-1];
80 
81 	__coverity_negative_sink__(len);
82 	__coverity_writeall__(dst);
83 	return (dst);
84 }
85 
86 void *
87 umem_alloc_aligned(size_t size, size_t align, int kmflags)
88 {
89 	__coverity_negative_sink__(size);
90 	__coverity_negative_sink__(align);
91 
92 	if (((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL) || condition0) {
93 		void *buf = __coverity_alloc__(size);
94 		__coverity_mark_as_uninitialized_buffer__(buf);
95 		__coverity_mark_as_afm_allocated__(buf, "umem_free");
96 		return (buf);
97 	}
98 
99 	return (NULL);
100 }
101 
102 void *
103 umem_alloc(size_t size, int kmflags)
104 {
105 	__coverity_negative_sink__(size);
106 
107 	if (((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL) || condition0) {
108 		void *buf = __coverity_alloc__(size);
109 		__coverity_mark_as_uninitialized_buffer__(buf);
110 		__coverity_mark_as_afm_allocated__(buf, "umem_free");
111 		return (buf);
112 	}
113 
114 	return (NULL);
115 }
116 
117 void *
118 umem_zalloc(size_t size, int kmflags)
119 {
120 	__coverity_negative_sink__(size);
121 
122 	if (((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL) || condition0) {
123 		void *buf = __coverity_alloc__(size);
124 		__coverity_writeall0__(buf);
125 		__coverity_mark_as_afm_allocated__(buf, "umem_free");
126 		return (buf);
127 	}
128 
129 	return (NULL);
130 }
131 
132 void
133 umem_free(void *buf, size_t size)
134 {
135 	__coverity_negative_sink__(size);
136 	__coverity_free__(buf);
137 }
138 
139 typedef struct {} umem_cache_t;
140 
141 void *
142 umem_cache_alloc(umem_cache_t *skc, int flags)
143 {
144 	(void) skc;
145 
146 	if (condition1)
147 		__coverity_sleep__();
148 
149 	if (((UMEM_NOFAIL & flags) == UMEM_NOFAIL) || condition0) {
150 		void *buf = __coverity_alloc_nosize__();
151 		__coverity_mark_as_uninitialized_buffer__(buf);
152 		__coverity_mark_as_afm_allocated__(buf, "umem_cache_free");
153 		return (buf);
154 	}
155 
156 	return (NULL);
157 }
158 
159 void
160 umem_cache_free(umem_cache_t *skc, void *obj)
161 {
162 	(void) skc;
163 
164 	__coverity_free__(obj);
165 }
166 
167 void *
168 spl_kmem_alloc(size_t sz, int fl, const char *func, int line)
169 {
170 	(void) func;
171 	(void) line;
172 
173 	__coverity_negative_sink__(sz);
174 
175 	if (condition1)
176 		__coverity_sleep__();
177 
178 	if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) {
179 		void *buf = __coverity_alloc__(sz);
180 		__coverity_mark_as_uninitialized_buffer__(buf);
181 		__coverity_mark_as_afm_allocated__(buf, "spl_kmem_free");
182 		return (buf);
183 	}
184 
185 	return (NULL);
186 }
187 
188 void *
189 spl_kmem_zalloc(size_t sz, int fl, const char *func, int line)
190 {
191 	(void) func;
192 	(void) line;
193 
194 	__coverity_negative_sink__(sz);
195 
196 	if (condition1)
197 		__coverity_sleep__();
198 
199 	if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) {
200 		void *buf = __coverity_alloc__(sz);
201 		__coverity_writeall0__(buf);
202 		__coverity_mark_as_afm_allocated__(buf, "spl_kmem_free");
203 		return (buf);
204 	}
205 
206 	return (NULL);
207 }
208 
209 void
210 spl_kmem_free(const void *ptr, size_t sz)
211 {
212 	__coverity_negative_sink__(sz);
213 	__coverity_free__(ptr);
214 }
215 
216 char *
217 kmem_vasprintf(const char *fmt, va_list ap)
218 {
219 	char *buf = __coverity_alloc_nosize__();
220 	(void) ap;
221 
222 	__coverity_string_null_sink__(fmt);
223 	__coverity_string_size_sink__(fmt);
224 
225 	__coverity_writeall__(buf);
226 
227 	__coverity_mark_as_afm_allocated__(buf, "kmem_strfree");
228 
229 	return (buf);
230 }
231 
232 char *
233 kmem_asprintf(const char *fmt, ...)
234 {
235 	char *buf = __coverity_alloc_nosize__();
236 
237 	__coverity_string_null_sink__(fmt);
238 	__coverity_string_size_sink__(fmt);
239 
240 	__coverity_writeall__(buf);
241 
242 	__coverity_mark_as_afm_allocated__(buf, "kmem_strfree");
243 
244 	return (buf);
245 }
246 
247 char *
248 kmem_strdup(const char *str)
249 {
250 	char *buf = __coverity_alloc_nosize__();
251 
252 	__coverity_string_null_sink__(str);
253 	__coverity_string_size_sink__(str);
254 
255 	__coverity_writeall__(buf);
256 
257 	__coverity_mark_as_afm_allocated__(buf, "kmem_strfree");
258 
259 	return (buf);
260 
261 
262 }
263 
264 void
265 kmem_strfree(char *str)
266 {
267 	__coverity_free__(str);
268 }
269 
270 void *
271 spl_vmem_alloc(size_t sz, int fl, const char *func, int line)
272 {
273 	(void) func;
274 	(void) line;
275 
276 	__coverity_negative_sink__(sz);
277 
278 	if (condition1)
279 		__coverity_sleep__();
280 
281 	if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) {
282 		void *buf = __coverity_alloc__(sz);
283 		__coverity_mark_as_uninitialized_buffer__(buf);
284 		__coverity_mark_as_afm_allocated__(buf, "spl_vmem_free");
285 		return (buf);
286 	}
287 
288 	return (NULL);
289 }
290 
291 void *
292 spl_vmem_zalloc(size_t sz, int fl, const char *func, int line)
293 {
294 	(void) func;
295 	(void) line;
296 
297 	if (condition1)
298 		__coverity_sleep__();
299 
300 	if (((fl & KM_NOSLEEP) != KM_NOSLEEP) || condition0) {
301 		void *buf = __coverity_alloc__(sz);
302 		__coverity_writeall0__(buf);
303 		__coverity_mark_as_afm_allocated__(buf, "spl_vmem_free");
304 		return (buf);
305 	}
306 
307 	return (NULL);
308 }
309 
310 void
311 spl_vmem_free(const void *ptr, size_t sz)
312 {
313 	__coverity_negative_sink__(sz);
314 	__coverity_free__(ptr);
315 }
316 
317 typedef struct {} spl_kmem_cache_t;
318 
319 void *
320 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
321 {
322 	(void) skc;
323 
324 	if (condition1)
325 		__coverity_sleep__();
326 
327 	if ((flags == 0) || condition0) {
328 		void *buf = __coverity_alloc_nosize__();
329 		__coverity_mark_as_uninitialized_buffer__(buf);
330 		__coverity_mark_as_afm_allocated__(buf, "spl_kmem_cache_free");
331 		return (buf);
332 	}
333 }
334 
335 void
336 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
337 {
338 	(void) skc;
339 
340 	__coverity_free__(obj);
341 }
342 
343 typedef struct {} zfsvfs_t;
344 
345 int
346 zfsvfs_create(const char *osname, boolean_t readonly, zfsvfs_t **zfvp)
347 {
348 	(void) osname;
349 	(void) readonly;
350 
351 	if (condition1)
352 		__coverity_sleep__();
353 
354 	if (condition0) {
355 		*zfvp = __coverity_alloc_nosize__();
356 		__coverity_writeall__(*zfvp);
357 		return (0);
358 	}
359 
360 	return (1);
361 }
362 
363 void
364 zfsvfs_free(zfsvfs_t *zfsvfs)
365 {
366 	__coverity_free__(zfsvfs);
367 }
368 
369 typedef struct {} nvlist_t;
370 
371 int
372 nvlist_alloc(nvlist_t **nvlp, uint_t nvflag, int kmflag)
373 {
374 	(void) nvflag;
375 
376 	if (condition1)
377 		__coverity_sleep__();
378 
379 	if ((kmflag == 0) || condition0) {
380 		*nvlp = __coverity_alloc_nosize__();
381 		__coverity_mark_as_afm_allocated__(*nvlp, "nvlist_free");
382 		__coverity_writeall__(*nvlp);
383 		return (0);
384 	}
385 
386 	return (-1);
387 
388 }
389 
390 int
391 nvlist_dup(const nvlist_t *nvl, nvlist_t **nvlp, int kmflag)
392 {
393 	nvlist_t read = *nvl;
394 
395 	if (condition1)
396 		__coverity_sleep__();
397 
398 	if ((kmflag == 0) || condition0) {
399 		nvlist_t *nvl = __coverity_alloc_nosize__();
400 		__coverity_mark_as_afm_allocated__(nvl, "nvlist_free");
401 		__coverity_writeall__(nvl);
402 		*nvlp = nvl;
403 		return (0);
404 	}
405 
406 	return (-1);
407 }
408 
409 void
410 nvlist_free(nvlist_t *nvl)
411 {
412 	__coverity_free__(nvl);
413 }
414 
415 int
416 nvlist_pack(nvlist_t *nvl, char **bufp, size_t *buflen, int encoding,
417     int kmflag)
418 {
419 	(void) nvl;
420 	(void) encoding;
421 
422 	if (*bufp == NULL) {
423 		if (condition1)
424 			__coverity_sleep__();
425 
426 		if ((kmflag == 0) || condition0) {
427 			char *buf = __coverity_alloc_nosize__();
428 			__coverity_writeall__(buf);
429 			/*
430 			 * We cannot use __coverity_mark_as_afm_allocated__()
431 			 * because the free function varies between the kernel
432 			 * and userspace.
433 			 */
434 			*bufp = buf;
435 			return (0);
436 		}
437 
438 		return (-1);
439 	}
440 
441 	/*
442 	 * Unfortunately, errors from the buffer being too small are not
443 	 * possible to model, so we assume success.
444 	 */
445 	__coverity_negative_sink__(*buflen);
446 	__coverity_writeall__(*bufp);
447 	return (0);
448 }
449 
450 
451 int
452 nvlist_unpack(char *buf, size_t buflen, nvlist_t **nvlp, int kmflag)
453 {
454 	__coverity_negative_sink__(buflen);
455 
456 	if (condition1)
457 		__coverity_sleep__();
458 
459 	if ((kmflag == 0) || condition0) {
460 		nvlist_t *nvl = __coverity_alloc_nosize__();
461 		__coverity_mark_as_afm_allocated__(nvl, "nvlist_free");
462 		__coverity_writeall__(nvl);
463 		*nvlp = nvl;
464 		int first = buf[0];
465 		int last = buf[buflen-1];
466 		return (0);
467 	}
468 
469 	return (-1);
470 
471 }
472 
473 void *
474 malloc(size_t size)
475 {
476 	void *buf = __coverity_alloc__(size);
477 
478 	if (condition1)
479 		__coverity_sleep__();
480 
481 	__coverity_negative_sink__(size);
482 	__coverity_mark_as_uninitialized_buffer__(buf);
483 	__coverity_mark_as_afm_allocated__(buf, "free");
484 
485 	return (buf);
486 }
487 
488 void *
489 calloc(size_t nmemb, size_t size)
490 {
491 	void *buf = __coverity_alloc__(size * nmemb);
492 
493 	if (condition1)
494 		__coverity_sleep__();
495 
496 	__coverity_negative_sink__(size);
497 	__coverity_writeall0__(buf);
498 	__coverity_mark_as_afm_allocated__(buf, "free");
499 	return (buf);
500 }
501 void
502 free(void *buf)
503 {
504 	__coverity_free__(buf);
505 }
506 
507 int
508 sched_yield(void)
509 {
510 	__coverity_sleep__();
511 }
512 
513 typedef struct {} kmutex_t;
514 typedef struct {} krwlock_t;
515 typedef int krw_t;
516 
517 /*
518  * Coverty reportedly does not support macros, so this only works for
519  * userspace.
520  */
521 
522 void
523 mutex_enter(kmutex_t *mp)
524 {
525 	if (condition0)
526 		__coverity_sleep__();
527 
528 	__coverity_exclusive_lock_acquire__(mp);
529 }
530 
531 int
532 mutex_tryenter(kmutex_t *mp)
533 {
534 	if (condition0) {
535 		__coverity_exclusive_lock_acquire__(mp);
536 		return (1);
537 	}
538 
539 	return (0);
540 }
541 
542 void
543 mutex_exit(kmutex_t *mp)
544 {
545 	__coverity_exclusive_lock_release__(mp);
546 }
547 
548 void
549 rw_enter(krwlock_t *rwlp, krw_t rw)
550 {
551 	(void) rw;
552 
553 	if (condition0)
554 		__coverity_sleep__();
555 
556 	__coverity_recursive_lock_acquire__(rwlp);
557 }
558 
559 void
560 rw_exit(krwlock_t *rwlp)
561 {
562 	__coverity_recursive_lock_release__(rwlp);
563 
564 }
565 
566 int
567 rw_tryenter(krwlock_t *rwlp, krw_t rw)
568 {
569 	if (condition0) {
570 		__coverity_recursive_lock_acquire__(rwlp);
571 		return (1);
572 	}
573 
574 	return (0);
575 }
576 
577 /* Thus, we fallback to the Linux kernel locks */
578 struct {} mutex;
579 struct {} rw_semaphore;
580 
581 void
582 mutex_lock(struct mutex *lock)
583 {
584 	if (condition0) {
585 		__coverity_sleep__();
586 	}
587 	__coverity_exclusive_lock_acquire__(lock);
588 }
589 
590 void
591 mutex_unlock(struct mutex *lock)
592 {
593 	__coverity_exclusive_lock_release__(lock);
594 }
595 
596 void
597 down_read(struct rw_semaphore *sem)
598 {
599 	if (condition0) {
600 		__coverity_sleep__();
601 	}
602 	__coverity_recursive_lock_acquire__(sem);
603 }
604 
605 void
606 down_write(struct rw_semaphore *sem)
607 {
608 	if (condition0) {
609 		__coverity_sleep__();
610 	}
611 	__coverity_recursive_lock_acquire__(sem);
612 }
613 
614 int
615 down_read_trylock(struct rw_semaphore *sem)
616 {
617 	if (condition0) {
618 		__coverity_recursive_lock_acquire__(sem);
619 		return (1);
620 	}
621 
622 	return (0);
623 }
624 
625 int
626 down_write_trylock(struct rw_semaphore *sem)
627 {
628 	if (condition0) {
629 		__coverity_recursive_lock_acquire__(sem);
630 		return (1);
631 	}
632 
633 	return (0);
634 }
635 
636 void
637 up_read(struct rw_semaphore *sem)
638 {
639 	__coverity_recursive_lock_release__(sem);
640 }
641 
642 void
643 up_write(struct rw_semaphore *sem)
644 {
645 	__coverity_recursive_lock_release__(sem);
646 }
647 
648 int
649 __cond_resched(void)
650 {
651 	if (condition0) {
652 		__coverity_sleep__();
653 	}
654 }
655