xref: /linux/mm/z3fold.c (revision 52338415)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * z3fold.c
4  *
5  * Author: Vitaly Wool <vitaly.wool@konsulko.com>
6  * Copyright (C) 2016, Sony Mobile Communications Inc.
7  *
8  * This implementation is based on zbud written by Seth Jennings.
9  *
10  * z3fold is an special purpose allocator for storing compressed pages. It
11  * can store up to three compressed pages per page which improves the
12  * compression ratio of zbud while retaining its main concepts (e. g. always
13  * storing an integral number of objects per page) and simplicity.
14  * It still has simple and deterministic reclaim properties that make it
15  * preferable to a higher density approach (with no requirement on integral
16  * number of object per page) when reclaim is used.
17  *
18  * As in zbud, pages are divided into "chunks".  The size of the chunks is
19  * fixed at compile time and is determined by NCHUNKS_ORDER below.
20  *
21  * z3fold doesn't export any API and is meant to be used via zpool API.
22  */
23 
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 
26 #include <linux/atomic.h>
27 #include <linux/sched.h>
28 #include <linux/cpumask.h>
29 #include <linux/list.h>
30 #include <linux/mm.h>
31 #include <linux/module.h>
32 #include <linux/page-flags.h>
33 #include <linux/migrate.h>
34 #include <linux/node.h>
35 #include <linux/compaction.h>
36 #include <linux/percpu.h>
37 #include <linux/mount.h>
38 #include <linux/pseudo_fs.h>
39 #include <linux/fs.h>
40 #include <linux/preempt.h>
41 #include <linux/workqueue.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/zpool.h>
45 #include <linux/magic.h>
46 
47 /*
48  * NCHUNKS_ORDER determines the internal allocation granularity, effectively
49  * adjusting internal fragmentation.  It also determines the number of
50  * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the
51  * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks
52  * in the beginning of an allocated page are occupied by z3fold header, so
53  * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y),
54  * which shows the max number of free chunks in z3fold page, also there will
55  * be 63, or 62, respectively, freelists per pool.
56  */
57 #define NCHUNKS_ORDER	6
58 
59 #define CHUNK_SHIFT	(PAGE_SHIFT - NCHUNKS_ORDER)
60 #define CHUNK_SIZE	(1 << CHUNK_SHIFT)
61 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE)
62 #define ZHDR_CHUNKS	(ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT)
63 #define TOTAL_CHUNKS	(PAGE_SIZE >> CHUNK_SHIFT)
64 #define NCHUNKS		((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT)
65 
66 #define BUDDY_MASK	(0x3)
67 #define BUDDY_SHIFT	2
68 #define SLOTS_ALIGN	(0x40)
69 
70 /*****************
71  * Structures
72 *****************/
73 struct z3fold_pool;
74 struct z3fold_ops {
75 	int (*evict)(struct z3fold_pool *pool, unsigned long handle);
76 };
77 
78 enum buddy {
79 	HEADLESS = 0,
80 	FIRST,
81 	MIDDLE,
82 	LAST,
83 	BUDDIES_MAX = LAST
84 };
85 
86 struct z3fold_buddy_slots {
87 	/*
88 	 * we are using BUDDY_MASK in handle_to_buddy etc. so there should
89 	 * be enough slots to hold all possible variants
90 	 */
91 	unsigned long slot[BUDDY_MASK + 1];
92 	unsigned long pool; /* back link + flags */
93 };
94 #define HANDLE_FLAG_MASK	(0x03)
95 
96 /*
97  * struct z3fold_header - z3fold page metadata occupying first chunks of each
98  *			z3fold page, except for HEADLESS pages
99  * @buddy:		links the z3fold page into the relevant list in the
100  *			pool
101  * @page_lock:		per-page lock
102  * @refcount:		reference count for the z3fold page
103  * @work:		work_struct for page layout optimization
104  * @slots:		pointer to the structure holding buddy slots
105  * @pool:		pointer to the containing pool
106  * @cpu:		CPU which this page "belongs" to
107  * @first_chunks:	the size of the first buddy in chunks, 0 if free
108  * @middle_chunks:	the size of the middle buddy in chunks, 0 if free
109  * @last_chunks:	the size of the last buddy in chunks, 0 if free
110  * @first_num:		the starting number (for the first handle)
111  * @mapped_count:	the number of objects currently mapped
112  */
113 struct z3fold_header {
114 	struct list_head buddy;
115 	spinlock_t page_lock;
116 	struct kref refcount;
117 	struct work_struct work;
118 	struct z3fold_buddy_slots *slots;
119 	struct z3fold_pool *pool;
120 	short cpu;
121 	unsigned short first_chunks;
122 	unsigned short middle_chunks;
123 	unsigned short last_chunks;
124 	unsigned short start_middle;
125 	unsigned short first_num:2;
126 	unsigned short mapped_count:2;
127 };
128 
129 /**
130  * struct z3fold_pool - stores metadata for each z3fold pool
131  * @name:	pool name
132  * @lock:	protects pool unbuddied/lru lists
133  * @stale_lock:	protects pool stale page list
134  * @unbuddied:	per-cpu array of lists tracking z3fold pages that contain 2-
135  *		buddies; the list each z3fold page is added to depends on
136  *		the size of its free region.
137  * @lru:	list tracking the z3fold pages in LRU order by most recently
138  *		added buddy.
139  * @stale:	list of pages marked for freeing
140  * @pages_nr:	number of z3fold pages in the pool.
141  * @c_handle:	cache for z3fold_buddy_slots allocation
142  * @ops:	pointer to a structure of user defined operations specified at
143  *		pool creation time.
144  * @compact_wq:	workqueue for page layout background optimization
145  * @release_wq:	workqueue for safe page release
146  * @work:	work_struct for safe page release
147  * @inode:	inode for z3fold pseudo filesystem
148  *
149  * This structure is allocated at pool creation time and maintains metadata
150  * pertaining to a particular z3fold pool.
151  */
152 struct z3fold_pool {
153 	const char *name;
154 	spinlock_t lock;
155 	spinlock_t stale_lock;
156 	struct list_head *unbuddied;
157 	struct list_head lru;
158 	struct list_head stale;
159 	atomic64_t pages_nr;
160 	struct kmem_cache *c_handle;
161 	const struct z3fold_ops *ops;
162 	struct zpool *zpool;
163 	const struct zpool_ops *zpool_ops;
164 	struct workqueue_struct *compact_wq;
165 	struct workqueue_struct *release_wq;
166 	struct work_struct work;
167 	struct inode *inode;
168 };
169 
170 /*
171  * Internal z3fold page flags
172  */
173 enum z3fold_page_flags {
174 	PAGE_HEADLESS = 0,
175 	MIDDLE_CHUNK_MAPPED,
176 	NEEDS_COMPACTING,
177 	PAGE_STALE,
178 	PAGE_CLAIMED, /* by either reclaim or free */
179 };
180 
181 /*****************
182  * Helpers
183 *****************/
184 
185 /* Converts an allocation size in bytes to size in z3fold chunks */
186 static int size_to_chunks(size_t size)
187 {
188 	return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
189 }
190 
191 #define for_each_unbuddied_list(_iter, _begin) \
192 	for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++)
193 
194 static void compact_page_work(struct work_struct *w);
195 
196 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
197 							gfp_t gfp)
198 {
199 	struct z3fold_buddy_slots *slots;
200 
201 	slots = kmem_cache_alloc(pool->c_handle,
202 				 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
203 
204 	if (slots) {
205 		memset(slots->slot, 0, sizeof(slots->slot));
206 		slots->pool = (unsigned long)pool;
207 	}
208 
209 	return slots;
210 }
211 
212 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s)
213 {
214 	return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK);
215 }
216 
217 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle)
218 {
219 	return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1));
220 }
221 
222 static inline void free_handle(unsigned long handle)
223 {
224 	struct z3fold_buddy_slots *slots;
225 	int i;
226 	bool is_free;
227 
228 	if (handle & (1 << PAGE_HEADLESS))
229 		return;
230 
231 	WARN_ON(*(unsigned long *)handle == 0);
232 	*(unsigned long *)handle = 0;
233 	slots = handle_to_slots(handle);
234 	is_free = true;
235 	for (i = 0; i <= BUDDY_MASK; i++) {
236 		if (slots->slot[i]) {
237 			is_free = false;
238 			break;
239 		}
240 	}
241 
242 	if (is_free) {
243 		struct z3fold_pool *pool = slots_to_pool(slots);
244 
245 		kmem_cache_free(pool->c_handle, slots);
246 	}
247 }
248 
249 static int z3fold_init_fs_context(struct fs_context *fc)
250 {
251 	return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM;
252 }
253 
254 static struct file_system_type z3fold_fs = {
255 	.name		= "z3fold",
256 	.init_fs_context = z3fold_init_fs_context,
257 	.kill_sb	= kill_anon_super,
258 };
259 
260 static struct vfsmount *z3fold_mnt;
261 static int z3fold_mount(void)
262 {
263 	int ret = 0;
264 
265 	z3fold_mnt = kern_mount(&z3fold_fs);
266 	if (IS_ERR(z3fold_mnt))
267 		ret = PTR_ERR(z3fold_mnt);
268 
269 	return ret;
270 }
271 
272 static void z3fold_unmount(void)
273 {
274 	kern_unmount(z3fold_mnt);
275 }
276 
277 static const struct address_space_operations z3fold_aops;
278 static int z3fold_register_migration(struct z3fold_pool *pool)
279 {
280 	pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb);
281 	if (IS_ERR(pool->inode)) {
282 		pool->inode = NULL;
283 		return 1;
284 	}
285 
286 	pool->inode->i_mapping->private_data = pool;
287 	pool->inode->i_mapping->a_ops = &z3fold_aops;
288 	return 0;
289 }
290 
291 static void z3fold_unregister_migration(struct z3fold_pool *pool)
292 {
293 	if (pool->inode)
294 		iput(pool->inode);
295  }
296 
297 /* Initializes the z3fold header of a newly allocated z3fold page */
298 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
299 					struct z3fold_pool *pool, gfp_t gfp)
300 {
301 	struct z3fold_header *zhdr = page_address(page);
302 	struct z3fold_buddy_slots *slots;
303 
304 	INIT_LIST_HEAD(&page->lru);
305 	clear_bit(PAGE_HEADLESS, &page->private);
306 	clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
307 	clear_bit(NEEDS_COMPACTING, &page->private);
308 	clear_bit(PAGE_STALE, &page->private);
309 	clear_bit(PAGE_CLAIMED, &page->private);
310 	if (headless)
311 		return zhdr;
312 
313 	slots = alloc_slots(pool, gfp);
314 	if (!slots)
315 		return NULL;
316 
317 	spin_lock_init(&zhdr->page_lock);
318 	kref_init(&zhdr->refcount);
319 	zhdr->first_chunks = 0;
320 	zhdr->middle_chunks = 0;
321 	zhdr->last_chunks = 0;
322 	zhdr->first_num = 0;
323 	zhdr->start_middle = 0;
324 	zhdr->cpu = -1;
325 	zhdr->slots = slots;
326 	zhdr->pool = pool;
327 	INIT_LIST_HEAD(&zhdr->buddy);
328 	INIT_WORK(&zhdr->work, compact_page_work);
329 	return zhdr;
330 }
331 
332 /* Resets the struct page fields and frees the page */
333 static void free_z3fold_page(struct page *page, bool headless)
334 {
335 	if (!headless) {
336 		lock_page(page);
337 		__ClearPageMovable(page);
338 		unlock_page(page);
339 	}
340 	ClearPagePrivate(page);
341 	__free_page(page);
342 }
343 
344 /* Lock a z3fold page */
345 static inline void z3fold_page_lock(struct z3fold_header *zhdr)
346 {
347 	spin_lock(&zhdr->page_lock);
348 }
349 
350 /* Try to lock a z3fold page */
351 static inline int z3fold_page_trylock(struct z3fold_header *zhdr)
352 {
353 	return spin_trylock(&zhdr->page_lock);
354 }
355 
356 /* Unlock a z3fold page */
357 static inline void z3fold_page_unlock(struct z3fold_header *zhdr)
358 {
359 	spin_unlock(&zhdr->page_lock);
360 }
361 
362 /* Helper function to build the index */
363 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud)
364 {
365 	return (bud + zhdr->first_num) & BUDDY_MASK;
366 }
367 
368 /*
369  * Encodes the handle of a particular buddy within a z3fold page
370  * Pool lock should be held as this function accesses first_num
371  */
372 static unsigned long __encode_handle(struct z3fold_header *zhdr,
373 				struct z3fold_buddy_slots *slots,
374 				enum buddy bud)
375 {
376 	unsigned long h = (unsigned long)zhdr;
377 	int idx = 0;
378 
379 	/*
380 	 * For a headless page, its handle is its pointer with the extra
381 	 * PAGE_HEADLESS bit set
382 	 */
383 	if (bud == HEADLESS)
384 		return h | (1 << PAGE_HEADLESS);
385 
386 	/* otherwise, return pointer to encoded handle */
387 	idx = __idx(zhdr, bud);
388 	h += idx;
389 	if (bud == LAST)
390 		h |= (zhdr->last_chunks << BUDDY_SHIFT);
391 
392 	slots->slot[idx] = h;
393 	return (unsigned long)&slots->slot[idx];
394 }
395 
396 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud)
397 {
398 	return __encode_handle(zhdr, zhdr->slots, bud);
399 }
400 
401 /* Returns the z3fold page where a given handle is stored */
402 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h)
403 {
404 	unsigned long addr = h;
405 
406 	if (!(addr & (1 << PAGE_HEADLESS)))
407 		addr = *(unsigned long *)h;
408 
409 	return (struct z3fold_header *)(addr & PAGE_MASK);
410 }
411 
412 /* only for LAST bud, returns zero otherwise */
413 static unsigned short handle_to_chunks(unsigned long handle)
414 {
415 	unsigned long addr = *(unsigned long *)handle;
416 
417 	return (addr & ~PAGE_MASK) >> BUDDY_SHIFT;
418 }
419 
420 /*
421  * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle
422  *  but that doesn't matter. because the masking will result in the
423  *  correct buddy number.
424  */
425 static enum buddy handle_to_buddy(unsigned long handle)
426 {
427 	struct z3fold_header *zhdr;
428 	unsigned long addr;
429 
430 	WARN_ON(handle & (1 << PAGE_HEADLESS));
431 	addr = *(unsigned long *)handle;
432 	zhdr = (struct z3fold_header *)(addr & PAGE_MASK);
433 	return (addr - zhdr->first_num) & BUDDY_MASK;
434 }
435 
436 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
437 {
438 	return zhdr->pool;
439 }
440 
441 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
442 {
443 	struct page *page = virt_to_page(zhdr);
444 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
445 
446 	WARN_ON(!list_empty(&zhdr->buddy));
447 	set_bit(PAGE_STALE, &page->private);
448 	clear_bit(NEEDS_COMPACTING, &page->private);
449 	spin_lock(&pool->lock);
450 	if (!list_empty(&page->lru))
451 		list_del_init(&page->lru);
452 	spin_unlock(&pool->lock);
453 	if (locked)
454 		z3fold_page_unlock(zhdr);
455 	spin_lock(&pool->stale_lock);
456 	list_add(&zhdr->buddy, &pool->stale);
457 	queue_work(pool->release_wq, &pool->work);
458 	spin_unlock(&pool->stale_lock);
459 }
460 
461 static void __attribute__((__unused__))
462 			release_z3fold_page(struct kref *ref)
463 {
464 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
465 						refcount);
466 	__release_z3fold_page(zhdr, false);
467 }
468 
469 static void release_z3fold_page_locked(struct kref *ref)
470 {
471 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
472 						refcount);
473 	WARN_ON(z3fold_page_trylock(zhdr));
474 	__release_z3fold_page(zhdr, true);
475 }
476 
477 static void release_z3fold_page_locked_list(struct kref *ref)
478 {
479 	struct z3fold_header *zhdr = container_of(ref, struct z3fold_header,
480 					       refcount);
481 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
482 	spin_lock(&pool->lock);
483 	list_del_init(&zhdr->buddy);
484 	spin_unlock(&pool->lock);
485 
486 	WARN_ON(z3fold_page_trylock(zhdr));
487 	__release_z3fold_page(zhdr, true);
488 }
489 
490 static void free_pages_work(struct work_struct *w)
491 {
492 	struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work);
493 
494 	spin_lock(&pool->stale_lock);
495 	while (!list_empty(&pool->stale)) {
496 		struct z3fold_header *zhdr = list_first_entry(&pool->stale,
497 						struct z3fold_header, buddy);
498 		struct page *page = virt_to_page(zhdr);
499 
500 		list_del(&zhdr->buddy);
501 		if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
502 			continue;
503 		spin_unlock(&pool->stale_lock);
504 		cancel_work_sync(&zhdr->work);
505 		free_z3fold_page(page, false);
506 		cond_resched();
507 		spin_lock(&pool->stale_lock);
508 	}
509 	spin_unlock(&pool->stale_lock);
510 }
511 
512 /*
513  * Returns the number of free chunks in a z3fold page.
514  * NB: can't be used with HEADLESS pages.
515  */
516 static int num_free_chunks(struct z3fold_header *zhdr)
517 {
518 	int nfree;
519 	/*
520 	 * If there is a middle object, pick up the bigger free space
521 	 * either before or after it. Otherwise just subtract the number
522 	 * of chunks occupied by the first and the last objects.
523 	 */
524 	if (zhdr->middle_chunks != 0) {
525 		int nfree_before = zhdr->first_chunks ?
526 			0 : zhdr->start_middle - ZHDR_CHUNKS;
527 		int nfree_after = zhdr->last_chunks ?
528 			0 : TOTAL_CHUNKS -
529 				(zhdr->start_middle + zhdr->middle_chunks);
530 		nfree = max(nfree_before, nfree_after);
531 	} else
532 		nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks;
533 	return nfree;
534 }
535 
536 /* Add to the appropriate unbuddied list */
537 static inline void add_to_unbuddied(struct z3fold_pool *pool,
538 				struct z3fold_header *zhdr)
539 {
540 	if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 ||
541 			zhdr->middle_chunks == 0) {
542 		struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied);
543 
544 		int freechunks = num_free_chunks(zhdr);
545 		spin_lock(&pool->lock);
546 		list_add(&zhdr->buddy, &unbuddied[freechunks]);
547 		spin_unlock(&pool->lock);
548 		zhdr->cpu = smp_processor_id();
549 		put_cpu_ptr(pool->unbuddied);
550 	}
551 }
552 
553 static inline void *mchunk_memmove(struct z3fold_header *zhdr,
554 				unsigned short dst_chunk)
555 {
556 	void *beg = zhdr;
557 	return memmove(beg + (dst_chunk << CHUNK_SHIFT),
558 		       beg + (zhdr->start_middle << CHUNK_SHIFT),
559 		       zhdr->middle_chunks << CHUNK_SHIFT);
560 }
561 
562 #define BIG_CHUNK_GAP	3
563 /* Has to be called with lock held */
564 static int z3fold_compact_page(struct z3fold_header *zhdr)
565 {
566 	struct page *page = virt_to_page(zhdr);
567 
568 	if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
569 		return 0; /* can't move middle chunk, it's used */
570 
571 	if (unlikely(PageIsolated(page)))
572 		return 0;
573 
574 	if (zhdr->middle_chunks == 0)
575 		return 0; /* nothing to compact */
576 
577 	if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) {
578 		/* move to the beginning */
579 		mchunk_memmove(zhdr, ZHDR_CHUNKS);
580 		zhdr->first_chunks = zhdr->middle_chunks;
581 		zhdr->middle_chunks = 0;
582 		zhdr->start_middle = 0;
583 		zhdr->first_num++;
584 		return 1;
585 	}
586 
587 	/*
588 	 * moving data is expensive, so let's only do that if
589 	 * there's substantial gain (at least BIG_CHUNK_GAP chunks)
590 	 */
591 	if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 &&
592 	    zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >=
593 			BIG_CHUNK_GAP) {
594 		mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS);
595 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
596 		return 1;
597 	} else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 &&
598 		   TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle
599 					+ zhdr->middle_chunks) >=
600 			BIG_CHUNK_GAP) {
601 		unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks -
602 			zhdr->middle_chunks;
603 		mchunk_memmove(zhdr, new_start);
604 		zhdr->start_middle = new_start;
605 		return 1;
606 	}
607 
608 	return 0;
609 }
610 
611 static void do_compact_page(struct z3fold_header *zhdr, bool locked)
612 {
613 	struct z3fold_pool *pool = zhdr_to_pool(zhdr);
614 	struct page *page;
615 
616 	page = virt_to_page(zhdr);
617 	if (locked)
618 		WARN_ON(z3fold_page_trylock(zhdr));
619 	else
620 		z3fold_page_lock(zhdr);
621 	if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
622 		z3fold_page_unlock(zhdr);
623 		return;
624 	}
625 	spin_lock(&pool->lock);
626 	list_del_init(&zhdr->buddy);
627 	spin_unlock(&pool->lock);
628 
629 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
630 		atomic64_dec(&pool->pages_nr);
631 		return;
632 	}
633 
634 	if (unlikely(PageIsolated(page) ||
635 		     test_bit(PAGE_CLAIMED, &page->private) ||
636 		     test_bit(PAGE_STALE, &page->private))) {
637 		z3fold_page_unlock(zhdr);
638 		return;
639 	}
640 
641 	z3fold_compact_page(zhdr);
642 	add_to_unbuddied(pool, zhdr);
643 	z3fold_page_unlock(zhdr);
644 }
645 
646 static void compact_page_work(struct work_struct *w)
647 {
648 	struct z3fold_header *zhdr = container_of(w, struct z3fold_header,
649 						work);
650 
651 	do_compact_page(zhdr, false);
652 }
653 
654 /* returns _locked_ z3fold page header or NULL */
655 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool,
656 						size_t size, bool can_sleep)
657 {
658 	struct z3fold_header *zhdr = NULL;
659 	struct page *page;
660 	struct list_head *unbuddied;
661 	int chunks = size_to_chunks(size), i;
662 
663 lookup:
664 	/* First, try to find an unbuddied z3fold page. */
665 	unbuddied = get_cpu_ptr(pool->unbuddied);
666 	for_each_unbuddied_list(i, chunks) {
667 		struct list_head *l = &unbuddied[i];
668 
669 		zhdr = list_first_entry_or_null(READ_ONCE(l),
670 					struct z3fold_header, buddy);
671 
672 		if (!zhdr)
673 			continue;
674 
675 		/* Re-check under lock. */
676 		spin_lock(&pool->lock);
677 		l = &unbuddied[i];
678 		if (unlikely(zhdr != list_first_entry(READ_ONCE(l),
679 						struct z3fold_header, buddy)) ||
680 		    !z3fold_page_trylock(zhdr)) {
681 			spin_unlock(&pool->lock);
682 			zhdr = NULL;
683 			put_cpu_ptr(pool->unbuddied);
684 			if (can_sleep)
685 				cond_resched();
686 			goto lookup;
687 		}
688 		list_del_init(&zhdr->buddy);
689 		zhdr->cpu = -1;
690 		spin_unlock(&pool->lock);
691 
692 		page = virt_to_page(zhdr);
693 		if (test_bit(NEEDS_COMPACTING, &page->private)) {
694 			z3fold_page_unlock(zhdr);
695 			zhdr = NULL;
696 			put_cpu_ptr(pool->unbuddied);
697 			if (can_sleep)
698 				cond_resched();
699 			goto lookup;
700 		}
701 
702 		/*
703 		 * this page could not be removed from its unbuddied
704 		 * list while pool lock was held, and then we've taken
705 		 * page lock so kref_put could not be called before
706 		 * we got here, so it's safe to just call kref_get()
707 		 */
708 		kref_get(&zhdr->refcount);
709 		break;
710 	}
711 	put_cpu_ptr(pool->unbuddied);
712 
713 	if (!zhdr) {
714 		int cpu;
715 
716 		/* look for _exact_ match on other cpus' lists */
717 		for_each_online_cpu(cpu) {
718 			struct list_head *l;
719 
720 			unbuddied = per_cpu_ptr(pool->unbuddied, cpu);
721 			spin_lock(&pool->lock);
722 			l = &unbuddied[chunks];
723 
724 			zhdr = list_first_entry_or_null(READ_ONCE(l),
725 						struct z3fold_header, buddy);
726 
727 			if (!zhdr || !z3fold_page_trylock(zhdr)) {
728 				spin_unlock(&pool->lock);
729 				zhdr = NULL;
730 				continue;
731 			}
732 			list_del_init(&zhdr->buddy);
733 			zhdr->cpu = -1;
734 			spin_unlock(&pool->lock);
735 
736 			page = virt_to_page(zhdr);
737 			if (test_bit(NEEDS_COMPACTING, &page->private)) {
738 				z3fold_page_unlock(zhdr);
739 				zhdr = NULL;
740 				if (can_sleep)
741 					cond_resched();
742 				continue;
743 			}
744 			kref_get(&zhdr->refcount);
745 			break;
746 		}
747 	}
748 
749 	return zhdr;
750 }
751 
752 /*
753  * API Functions
754  */
755 
756 /**
757  * z3fold_create_pool() - create a new z3fold pool
758  * @name:	pool name
759  * @gfp:	gfp flags when allocating the z3fold pool structure
760  * @ops:	user-defined operations for the z3fold pool
761  *
762  * Return: pointer to the new z3fold pool or NULL if the metadata allocation
763  * failed.
764  */
765 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp,
766 		const struct z3fold_ops *ops)
767 {
768 	struct z3fold_pool *pool = NULL;
769 	int i, cpu;
770 
771 	pool = kzalloc(sizeof(struct z3fold_pool), gfp);
772 	if (!pool)
773 		goto out;
774 	pool->c_handle = kmem_cache_create("z3fold_handle",
775 				sizeof(struct z3fold_buddy_slots),
776 				SLOTS_ALIGN, 0, NULL);
777 	if (!pool->c_handle)
778 		goto out_c;
779 	spin_lock_init(&pool->lock);
780 	spin_lock_init(&pool->stale_lock);
781 	pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2);
782 	if (!pool->unbuddied)
783 		goto out_pool;
784 	for_each_possible_cpu(cpu) {
785 		struct list_head *unbuddied =
786 				per_cpu_ptr(pool->unbuddied, cpu);
787 		for_each_unbuddied_list(i, 0)
788 			INIT_LIST_HEAD(&unbuddied[i]);
789 	}
790 	INIT_LIST_HEAD(&pool->lru);
791 	INIT_LIST_HEAD(&pool->stale);
792 	atomic64_set(&pool->pages_nr, 0);
793 	pool->name = name;
794 	pool->compact_wq = create_singlethread_workqueue(pool->name);
795 	if (!pool->compact_wq)
796 		goto out_unbuddied;
797 	pool->release_wq = create_singlethread_workqueue(pool->name);
798 	if (!pool->release_wq)
799 		goto out_wq;
800 	if (z3fold_register_migration(pool))
801 		goto out_rwq;
802 	INIT_WORK(&pool->work, free_pages_work);
803 	pool->ops = ops;
804 	return pool;
805 
806 out_rwq:
807 	destroy_workqueue(pool->release_wq);
808 out_wq:
809 	destroy_workqueue(pool->compact_wq);
810 out_unbuddied:
811 	free_percpu(pool->unbuddied);
812 out_pool:
813 	kmem_cache_destroy(pool->c_handle);
814 out_c:
815 	kfree(pool);
816 out:
817 	return NULL;
818 }
819 
820 /**
821  * z3fold_destroy_pool() - destroys an existing z3fold pool
822  * @pool:	the z3fold pool to be destroyed
823  *
824  * The pool should be emptied before this function is called.
825  */
826 static void z3fold_destroy_pool(struct z3fold_pool *pool)
827 {
828 	kmem_cache_destroy(pool->c_handle);
829 
830 	/*
831 	 * We need to destroy pool->compact_wq before pool->release_wq,
832 	 * as any pending work on pool->compact_wq will call
833 	 * queue_work(pool->release_wq, &pool->work).
834 	 *
835 	 * There are still outstanding pages until both workqueues are drained,
836 	 * so we cannot unregister migration until then.
837 	 */
838 
839 	destroy_workqueue(pool->compact_wq);
840 	destroy_workqueue(pool->release_wq);
841 	z3fold_unregister_migration(pool);
842 	kfree(pool);
843 }
844 
845 /**
846  * z3fold_alloc() - allocates a region of a given size
847  * @pool:	z3fold pool from which to allocate
848  * @size:	size in bytes of the desired allocation
849  * @gfp:	gfp flags used if the pool needs to grow
850  * @handle:	handle of the new allocation
851  *
852  * This function will attempt to find a free region in the pool large enough to
853  * satisfy the allocation request.  A search of the unbuddied lists is
854  * performed first. If no suitable free region is found, then a new page is
855  * allocated and added to the pool to satisfy the request.
856  *
857  * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used
858  * as z3fold pool pages.
859  *
860  * Return: 0 if success and handle is set, otherwise -EINVAL if the size or
861  * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate
862  * a new page.
863  */
864 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
865 			unsigned long *handle)
866 {
867 	int chunks = size_to_chunks(size);
868 	struct z3fold_header *zhdr = NULL;
869 	struct page *page = NULL;
870 	enum buddy bud;
871 	bool can_sleep = gfpflags_allow_blocking(gfp);
872 
873 	if (!size)
874 		return -EINVAL;
875 
876 	if (size > PAGE_SIZE)
877 		return -ENOSPC;
878 
879 	if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
880 		bud = HEADLESS;
881 	else {
882 retry:
883 		zhdr = __z3fold_alloc(pool, size, can_sleep);
884 		if (zhdr) {
885 			if (zhdr->first_chunks == 0) {
886 				if (zhdr->middle_chunks != 0 &&
887 				    chunks >= zhdr->start_middle)
888 					bud = LAST;
889 				else
890 					bud = FIRST;
891 			} else if (zhdr->last_chunks == 0)
892 				bud = LAST;
893 			else if (zhdr->middle_chunks == 0)
894 				bud = MIDDLE;
895 			else {
896 				if (kref_put(&zhdr->refcount,
897 					     release_z3fold_page_locked))
898 					atomic64_dec(&pool->pages_nr);
899 				else
900 					z3fold_page_unlock(zhdr);
901 				pr_err("No free chunks in unbuddied\n");
902 				WARN_ON(1);
903 				goto retry;
904 			}
905 			page = virt_to_page(zhdr);
906 			goto found;
907 		}
908 		bud = FIRST;
909 	}
910 
911 	page = NULL;
912 	if (can_sleep) {
913 		spin_lock(&pool->stale_lock);
914 		zhdr = list_first_entry_or_null(&pool->stale,
915 						struct z3fold_header, buddy);
916 		/*
917 		 * Before allocating a page, let's see if we can take one from
918 		 * the stale pages list. cancel_work_sync() can sleep so we
919 		 * limit this case to the contexts where we can sleep
920 		 */
921 		if (zhdr) {
922 			list_del(&zhdr->buddy);
923 			spin_unlock(&pool->stale_lock);
924 			cancel_work_sync(&zhdr->work);
925 			page = virt_to_page(zhdr);
926 		} else {
927 			spin_unlock(&pool->stale_lock);
928 		}
929 	}
930 	if (!page)
931 		page = alloc_page(gfp);
932 
933 	if (!page)
934 		return -ENOMEM;
935 
936 	zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
937 	if (!zhdr) {
938 		__free_page(page);
939 		return -ENOMEM;
940 	}
941 	atomic64_inc(&pool->pages_nr);
942 
943 	if (bud == HEADLESS) {
944 		set_bit(PAGE_HEADLESS, &page->private);
945 		goto headless;
946 	}
947 	if (can_sleep) {
948 		lock_page(page);
949 		__SetPageMovable(page, pool->inode->i_mapping);
950 		unlock_page(page);
951 	} else {
952 		if (trylock_page(page)) {
953 			__SetPageMovable(page, pool->inode->i_mapping);
954 			unlock_page(page);
955 		}
956 	}
957 	z3fold_page_lock(zhdr);
958 
959 found:
960 	if (bud == FIRST)
961 		zhdr->first_chunks = chunks;
962 	else if (bud == LAST)
963 		zhdr->last_chunks = chunks;
964 	else {
965 		zhdr->middle_chunks = chunks;
966 		zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS;
967 	}
968 	add_to_unbuddied(pool, zhdr);
969 
970 headless:
971 	spin_lock(&pool->lock);
972 	/* Add/move z3fold page to beginning of LRU */
973 	if (!list_empty(&page->lru))
974 		list_del(&page->lru);
975 
976 	list_add(&page->lru, &pool->lru);
977 
978 	*handle = encode_handle(zhdr, bud);
979 	spin_unlock(&pool->lock);
980 	if (bud != HEADLESS)
981 		z3fold_page_unlock(zhdr);
982 
983 	return 0;
984 }
985 
986 /**
987  * z3fold_free() - frees the allocation associated with the given handle
988  * @pool:	pool in which the allocation resided
989  * @handle:	handle associated with the allocation returned by z3fold_alloc()
990  *
991  * In the case that the z3fold page in which the allocation resides is under
992  * reclaim, as indicated by the PG_reclaim flag being set, this function
993  * only sets the first|last_chunks to 0.  The page is actually freed
994  * once both buddies are evicted (see z3fold_reclaim_page() below).
995  */
996 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
997 {
998 	struct z3fold_header *zhdr;
999 	struct page *page;
1000 	enum buddy bud;
1001 	bool page_claimed;
1002 
1003 	zhdr = handle_to_z3fold_header(handle);
1004 	page = virt_to_page(zhdr);
1005 	page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1006 
1007 	if (test_bit(PAGE_HEADLESS, &page->private)) {
1008 		/* if a headless page is under reclaim, just leave.
1009 		 * NB: we use test_and_set_bit for a reason: if the bit
1010 		 * has not been set before, we release this page
1011 		 * immediately so we don't care about its value any more.
1012 		 */
1013 		if (!page_claimed) {
1014 			spin_lock(&pool->lock);
1015 			list_del(&page->lru);
1016 			spin_unlock(&pool->lock);
1017 			free_z3fold_page(page, true);
1018 			atomic64_dec(&pool->pages_nr);
1019 		}
1020 		return;
1021 	}
1022 
1023 	/* Non-headless case */
1024 	z3fold_page_lock(zhdr);
1025 	bud = handle_to_buddy(handle);
1026 
1027 	switch (bud) {
1028 	case FIRST:
1029 		zhdr->first_chunks = 0;
1030 		break;
1031 	case MIDDLE:
1032 		zhdr->middle_chunks = 0;
1033 		break;
1034 	case LAST:
1035 		zhdr->last_chunks = 0;
1036 		break;
1037 	default:
1038 		pr_err("%s: unknown bud %d\n", __func__, bud);
1039 		WARN_ON(1);
1040 		z3fold_page_unlock(zhdr);
1041 		return;
1042 	}
1043 
1044 	free_handle(handle);
1045 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) {
1046 		atomic64_dec(&pool->pages_nr);
1047 		return;
1048 	}
1049 	if (page_claimed) {
1050 		/* the page has not been claimed by us */
1051 		z3fold_page_unlock(zhdr);
1052 		return;
1053 	}
1054 	if (unlikely(PageIsolated(page)) ||
1055 	    test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1056 		z3fold_page_unlock(zhdr);
1057 		clear_bit(PAGE_CLAIMED, &page->private);
1058 		return;
1059 	}
1060 	if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
1061 		spin_lock(&pool->lock);
1062 		list_del_init(&zhdr->buddy);
1063 		spin_unlock(&pool->lock);
1064 		zhdr->cpu = -1;
1065 		kref_get(&zhdr->refcount);
1066 		do_compact_page(zhdr, true);
1067 		clear_bit(PAGE_CLAIMED, &page->private);
1068 		return;
1069 	}
1070 	kref_get(&zhdr->refcount);
1071 	queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
1072 	clear_bit(PAGE_CLAIMED, &page->private);
1073 	z3fold_page_unlock(zhdr);
1074 }
1075 
1076 /**
1077  * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1078  * @pool:	pool from which a page will attempt to be evicted
1079  * @retries:	number of pages on the LRU list for which eviction will
1080  *		be attempted before failing
1081  *
1082  * z3fold reclaim is different from normal system reclaim in that it is done
1083  * from the bottom, up. This is because only the bottom layer, z3fold, has
1084  * information on how the allocations are organized within each z3fold page.
1085  * This has the potential to create interesting locking situations between
1086  * z3fold and the user, however.
1087  *
1088  * To avoid these, this is how z3fold_reclaim_page() should be called:
1089  *
1090  * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1091  * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1092  * call the user-defined eviction handler with the pool and handle as
1093  * arguments.
1094  *
1095  * If the handle can not be evicted, the eviction handler should return
1096  * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1097  * appropriate list and try the next z3fold page on the LRU up to
1098  * a user defined number of retries.
1099  *
1100  * If the handle is successfully evicted, the eviction handler should
1101  * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free()
1102  * contains logic to delay freeing the page if the page is under reclaim,
1103  * as indicated by the setting of the PG_reclaim flag on the underlying page.
1104  *
1105  * If all buddies in the z3fold page are successfully evicted, then the
1106  * z3fold page can be freed.
1107  *
1108  * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1109  * no pages to evict or an eviction handler is not registered, -EAGAIN if
1110  * the retry limit was hit.
1111  */
1112 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1113 {
1114 	int i, ret = 0;
1115 	struct z3fold_header *zhdr = NULL;
1116 	struct page *page = NULL;
1117 	struct list_head *pos;
1118 	struct z3fold_buddy_slots slots;
1119 	unsigned long first_handle = 0, middle_handle = 0, last_handle = 0;
1120 
1121 	spin_lock(&pool->lock);
1122 	if (!pool->ops || !pool->ops->evict || retries == 0) {
1123 		spin_unlock(&pool->lock);
1124 		return -EINVAL;
1125 	}
1126 	for (i = 0; i < retries; i++) {
1127 		if (list_empty(&pool->lru)) {
1128 			spin_unlock(&pool->lock);
1129 			return -EINVAL;
1130 		}
1131 		list_for_each_prev(pos, &pool->lru) {
1132 			page = list_entry(pos, struct page, lru);
1133 
1134 			/* this bit could have been set by free, in which case
1135 			 * we pass over to the next page in the pool.
1136 			 */
1137 			if (test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1138 				page = NULL;
1139 				continue;
1140 			}
1141 
1142 			if (unlikely(PageIsolated(page))) {
1143 				clear_bit(PAGE_CLAIMED, &page->private);
1144 				page = NULL;
1145 				continue;
1146 			}
1147 			zhdr = page_address(page);
1148 			if (test_bit(PAGE_HEADLESS, &page->private))
1149 				break;
1150 
1151 			if (!z3fold_page_trylock(zhdr)) {
1152 				clear_bit(PAGE_CLAIMED, &page->private);
1153 				zhdr = NULL;
1154 				continue; /* can't evict at this point */
1155 			}
1156 			kref_get(&zhdr->refcount);
1157 			list_del_init(&zhdr->buddy);
1158 			zhdr->cpu = -1;
1159 			break;
1160 		}
1161 
1162 		if (!zhdr)
1163 			break;
1164 
1165 		list_del_init(&page->lru);
1166 		spin_unlock(&pool->lock);
1167 
1168 		if (!test_bit(PAGE_HEADLESS, &page->private)) {
1169 			/*
1170 			 * We need encode the handles before unlocking, and
1171 			 * use our local slots structure because z3fold_free
1172 			 * can zero out zhdr->slots and we can't do much
1173 			 * about that
1174 			 */
1175 			first_handle = 0;
1176 			last_handle = 0;
1177 			middle_handle = 0;
1178 			if (zhdr->first_chunks)
1179 				first_handle = __encode_handle(zhdr, &slots,
1180 								FIRST);
1181 			if (zhdr->middle_chunks)
1182 				middle_handle = __encode_handle(zhdr, &slots,
1183 								MIDDLE);
1184 			if (zhdr->last_chunks)
1185 				last_handle = __encode_handle(zhdr, &slots,
1186 								LAST);
1187 			/*
1188 			 * it's safe to unlock here because we hold a
1189 			 * reference to this page
1190 			 */
1191 			z3fold_page_unlock(zhdr);
1192 		} else {
1193 			first_handle = __encode_handle(zhdr, &slots, HEADLESS);
1194 			last_handle = middle_handle = 0;
1195 		}
1196 
1197 		/* Issue the eviction callback(s) */
1198 		if (middle_handle) {
1199 			ret = pool->ops->evict(pool, middle_handle);
1200 			if (ret)
1201 				goto next;
1202 		}
1203 		if (first_handle) {
1204 			ret = pool->ops->evict(pool, first_handle);
1205 			if (ret)
1206 				goto next;
1207 		}
1208 		if (last_handle) {
1209 			ret = pool->ops->evict(pool, last_handle);
1210 			if (ret)
1211 				goto next;
1212 		}
1213 next:
1214 		if (test_bit(PAGE_HEADLESS, &page->private)) {
1215 			if (ret == 0) {
1216 				free_z3fold_page(page, true);
1217 				atomic64_dec(&pool->pages_nr);
1218 				return 0;
1219 			}
1220 			spin_lock(&pool->lock);
1221 			list_add(&page->lru, &pool->lru);
1222 			spin_unlock(&pool->lock);
1223 			clear_bit(PAGE_CLAIMED, &page->private);
1224 		} else {
1225 			z3fold_page_lock(zhdr);
1226 			if (kref_put(&zhdr->refcount,
1227 					release_z3fold_page_locked)) {
1228 				atomic64_dec(&pool->pages_nr);
1229 				return 0;
1230 			}
1231 			/*
1232 			 * if we are here, the page is still not completely
1233 			 * free. Take the global pool lock then to be able
1234 			 * to add it back to the lru list
1235 			 */
1236 			spin_lock(&pool->lock);
1237 			list_add(&page->lru, &pool->lru);
1238 			spin_unlock(&pool->lock);
1239 			z3fold_page_unlock(zhdr);
1240 			clear_bit(PAGE_CLAIMED, &page->private);
1241 		}
1242 
1243 		/* We started off locked to we need to lock the pool back */
1244 		spin_lock(&pool->lock);
1245 	}
1246 	spin_unlock(&pool->lock);
1247 	return -EAGAIN;
1248 }
1249 
1250 /**
1251  * z3fold_map() - maps the allocation associated with the given handle
1252  * @pool:	pool in which the allocation resides
1253  * @handle:	handle associated with the allocation to be mapped
1254  *
1255  * Extracts the buddy number from handle and constructs the pointer to the
1256  * correct starting chunk within the page.
1257  *
1258  * Returns: a pointer to the mapped allocation
1259  */
1260 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle)
1261 {
1262 	struct z3fold_header *zhdr;
1263 	struct page *page;
1264 	void *addr;
1265 	enum buddy buddy;
1266 
1267 	zhdr = handle_to_z3fold_header(handle);
1268 	addr = zhdr;
1269 	page = virt_to_page(zhdr);
1270 
1271 	if (test_bit(PAGE_HEADLESS, &page->private))
1272 		goto out;
1273 
1274 	z3fold_page_lock(zhdr);
1275 	buddy = handle_to_buddy(handle);
1276 	switch (buddy) {
1277 	case FIRST:
1278 		addr += ZHDR_SIZE_ALIGNED;
1279 		break;
1280 	case MIDDLE:
1281 		addr += zhdr->start_middle << CHUNK_SHIFT;
1282 		set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1283 		break;
1284 	case LAST:
1285 		addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT);
1286 		break;
1287 	default:
1288 		pr_err("unknown buddy id %d\n", buddy);
1289 		WARN_ON(1);
1290 		addr = NULL;
1291 		break;
1292 	}
1293 
1294 	if (addr)
1295 		zhdr->mapped_count++;
1296 	z3fold_page_unlock(zhdr);
1297 out:
1298 	return addr;
1299 }
1300 
1301 /**
1302  * z3fold_unmap() - unmaps the allocation associated with the given handle
1303  * @pool:	pool in which the allocation resides
1304  * @handle:	handle associated with the allocation to be unmapped
1305  */
1306 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle)
1307 {
1308 	struct z3fold_header *zhdr;
1309 	struct page *page;
1310 	enum buddy buddy;
1311 
1312 	zhdr = handle_to_z3fold_header(handle);
1313 	page = virt_to_page(zhdr);
1314 
1315 	if (test_bit(PAGE_HEADLESS, &page->private))
1316 		return;
1317 
1318 	z3fold_page_lock(zhdr);
1319 	buddy = handle_to_buddy(handle);
1320 	if (buddy == MIDDLE)
1321 		clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1322 	zhdr->mapped_count--;
1323 	z3fold_page_unlock(zhdr);
1324 }
1325 
1326 /**
1327  * z3fold_get_pool_size() - gets the z3fold pool size in pages
1328  * @pool:	pool whose size is being queried
1329  *
1330  * Returns: size in pages of the given pool.
1331  */
1332 static u64 z3fold_get_pool_size(struct z3fold_pool *pool)
1333 {
1334 	return atomic64_read(&pool->pages_nr);
1335 }
1336 
1337 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1338 {
1339 	struct z3fold_header *zhdr;
1340 	struct z3fold_pool *pool;
1341 
1342 	VM_BUG_ON_PAGE(!PageMovable(page), page);
1343 	VM_BUG_ON_PAGE(PageIsolated(page), page);
1344 
1345 	if (test_bit(PAGE_HEADLESS, &page->private) ||
1346 	    test_bit(PAGE_CLAIMED, &page->private))
1347 		return false;
1348 
1349 	zhdr = page_address(page);
1350 	z3fold_page_lock(zhdr);
1351 	if (test_bit(NEEDS_COMPACTING, &page->private) ||
1352 	    test_bit(PAGE_STALE, &page->private))
1353 		goto out;
1354 
1355 	pool = zhdr_to_pool(zhdr);
1356 
1357 	if (zhdr->mapped_count == 0) {
1358 		kref_get(&zhdr->refcount);
1359 		if (!list_empty(&zhdr->buddy))
1360 			list_del_init(&zhdr->buddy);
1361 		spin_lock(&pool->lock);
1362 		if (!list_empty(&page->lru))
1363 			list_del(&page->lru);
1364 		spin_unlock(&pool->lock);
1365 		z3fold_page_unlock(zhdr);
1366 		return true;
1367 	}
1368 out:
1369 	z3fold_page_unlock(zhdr);
1370 	return false;
1371 }
1372 
1373 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1374 			       struct page *page, enum migrate_mode mode)
1375 {
1376 	struct z3fold_header *zhdr, *new_zhdr;
1377 	struct z3fold_pool *pool;
1378 	struct address_space *new_mapping;
1379 
1380 	VM_BUG_ON_PAGE(!PageMovable(page), page);
1381 	VM_BUG_ON_PAGE(!PageIsolated(page), page);
1382 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1383 
1384 	zhdr = page_address(page);
1385 	pool = zhdr_to_pool(zhdr);
1386 
1387 	if (!z3fold_page_trylock(zhdr)) {
1388 		return -EAGAIN;
1389 	}
1390 	if (zhdr->mapped_count != 0) {
1391 		z3fold_page_unlock(zhdr);
1392 		return -EBUSY;
1393 	}
1394 	if (work_pending(&zhdr->work)) {
1395 		z3fold_page_unlock(zhdr);
1396 		return -EAGAIN;
1397 	}
1398 	new_zhdr = page_address(newpage);
1399 	memcpy(new_zhdr, zhdr, PAGE_SIZE);
1400 	newpage->private = page->private;
1401 	page->private = 0;
1402 	z3fold_page_unlock(zhdr);
1403 	spin_lock_init(&new_zhdr->page_lock);
1404 	INIT_WORK(&new_zhdr->work, compact_page_work);
1405 	/*
1406 	 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
1407 	 * so we only have to reinitialize it.
1408 	 */
1409 	INIT_LIST_HEAD(&new_zhdr->buddy);
1410 	new_mapping = page_mapping(page);
1411 	__ClearPageMovable(page);
1412 	ClearPagePrivate(page);
1413 
1414 	get_page(newpage);
1415 	z3fold_page_lock(new_zhdr);
1416 	if (new_zhdr->first_chunks)
1417 		encode_handle(new_zhdr, FIRST);
1418 	if (new_zhdr->last_chunks)
1419 		encode_handle(new_zhdr, LAST);
1420 	if (new_zhdr->middle_chunks)
1421 		encode_handle(new_zhdr, MIDDLE);
1422 	set_bit(NEEDS_COMPACTING, &newpage->private);
1423 	new_zhdr->cpu = smp_processor_id();
1424 	spin_lock(&pool->lock);
1425 	list_add(&newpage->lru, &pool->lru);
1426 	spin_unlock(&pool->lock);
1427 	__SetPageMovable(newpage, new_mapping);
1428 	z3fold_page_unlock(new_zhdr);
1429 
1430 	queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
1431 
1432 	page_mapcount_reset(page);
1433 	put_page(page);
1434 	return 0;
1435 }
1436 
1437 static void z3fold_page_putback(struct page *page)
1438 {
1439 	struct z3fold_header *zhdr;
1440 	struct z3fold_pool *pool;
1441 
1442 	zhdr = page_address(page);
1443 	pool = zhdr_to_pool(zhdr);
1444 
1445 	z3fold_page_lock(zhdr);
1446 	if (!list_empty(&zhdr->buddy))
1447 		list_del_init(&zhdr->buddy);
1448 	INIT_LIST_HEAD(&page->lru);
1449 	if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) {
1450 		atomic64_dec(&pool->pages_nr);
1451 		return;
1452 	}
1453 	spin_lock(&pool->lock);
1454 	list_add(&page->lru, &pool->lru);
1455 	spin_unlock(&pool->lock);
1456 	z3fold_page_unlock(zhdr);
1457 }
1458 
1459 static const struct address_space_operations z3fold_aops = {
1460 	.isolate_page = z3fold_page_isolate,
1461 	.migratepage = z3fold_page_migrate,
1462 	.putback_page = z3fold_page_putback,
1463 };
1464 
1465 /*****************
1466  * zpool
1467  ****************/
1468 
1469 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle)
1470 {
1471 	if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict)
1472 		return pool->zpool_ops->evict(pool->zpool, handle);
1473 	else
1474 		return -ENOENT;
1475 }
1476 
1477 static const struct z3fold_ops z3fold_zpool_ops = {
1478 	.evict =	z3fold_zpool_evict
1479 };
1480 
1481 static void *z3fold_zpool_create(const char *name, gfp_t gfp,
1482 			       const struct zpool_ops *zpool_ops,
1483 			       struct zpool *zpool)
1484 {
1485 	struct z3fold_pool *pool;
1486 
1487 	pool = z3fold_create_pool(name, gfp,
1488 				zpool_ops ? &z3fold_zpool_ops : NULL);
1489 	if (pool) {
1490 		pool->zpool = zpool;
1491 		pool->zpool_ops = zpool_ops;
1492 	}
1493 	return pool;
1494 }
1495 
1496 static void z3fold_zpool_destroy(void *pool)
1497 {
1498 	z3fold_destroy_pool(pool);
1499 }
1500 
1501 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp,
1502 			unsigned long *handle)
1503 {
1504 	return z3fold_alloc(pool, size, gfp, handle);
1505 }
1506 static void z3fold_zpool_free(void *pool, unsigned long handle)
1507 {
1508 	z3fold_free(pool, handle);
1509 }
1510 
1511 static int z3fold_zpool_shrink(void *pool, unsigned int pages,
1512 			unsigned int *reclaimed)
1513 {
1514 	unsigned int total = 0;
1515 	int ret = -EINVAL;
1516 
1517 	while (total < pages) {
1518 		ret = z3fold_reclaim_page(pool, 8);
1519 		if (ret < 0)
1520 			break;
1521 		total++;
1522 	}
1523 
1524 	if (reclaimed)
1525 		*reclaimed = total;
1526 
1527 	return ret;
1528 }
1529 
1530 static void *z3fold_zpool_map(void *pool, unsigned long handle,
1531 			enum zpool_mapmode mm)
1532 {
1533 	return z3fold_map(pool, handle);
1534 }
1535 static void z3fold_zpool_unmap(void *pool, unsigned long handle)
1536 {
1537 	z3fold_unmap(pool, handle);
1538 }
1539 
1540 static u64 z3fold_zpool_total_size(void *pool)
1541 {
1542 	return z3fold_get_pool_size(pool) * PAGE_SIZE;
1543 }
1544 
1545 static struct zpool_driver z3fold_zpool_driver = {
1546 	.type =		"z3fold",
1547 	.owner =	THIS_MODULE,
1548 	.create =	z3fold_zpool_create,
1549 	.destroy =	z3fold_zpool_destroy,
1550 	.malloc =	z3fold_zpool_malloc,
1551 	.free =		z3fold_zpool_free,
1552 	.shrink =	z3fold_zpool_shrink,
1553 	.map =		z3fold_zpool_map,
1554 	.unmap =	z3fold_zpool_unmap,
1555 	.total_size =	z3fold_zpool_total_size,
1556 };
1557 
1558 MODULE_ALIAS("zpool-z3fold");
1559 
1560 static int __init init_z3fold(void)
1561 {
1562 	int ret;
1563 
1564 	/* Make sure the z3fold header is not larger than the page size */
1565 	BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE);
1566 	ret = z3fold_mount();
1567 	if (ret)
1568 		return ret;
1569 
1570 	zpool_register_driver(&z3fold_zpool_driver);
1571 
1572 	return 0;
1573 }
1574 
1575 static void __exit exit_z3fold(void)
1576 {
1577 	z3fold_unmount();
1578 	zpool_unregister_driver(&z3fold_zpool_driver);
1579 }
1580 
1581 module_init(init_z3fold);
1582 module_exit(exit_z3fold);
1583 
1584 MODULE_LICENSE("GPL");
1585 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>");
1586 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages");
1587