xref: /linux/drivers/dax/hmem/hmem.c (revision 1d5198dd)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/platform_device.h>
3 #include <linux/memregion.h>
4 #include <linux/module.h>
5 #include <linux/pfn_t.h>
6 #include <linux/dax.h>
7 #include "../bus.h"
8 
9 static bool region_idle;
10 module_param_named(region_idle, region_idle, bool, 0644);
11 
dax_hmem_probe(struct platform_device * pdev)12 static int dax_hmem_probe(struct platform_device *pdev)
13 {
14 	unsigned long flags = IORESOURCE_DAX_KMEM;
15 	struct device *dev = &pdev->dev;
16 	struct dax_region *dax_region;
17 	struct memregion_info *mri;
18 	struct dev_dax_data data;
19 
20 	/*
21 	 * @region_idle == true indicates that an administrative agent
22 	 * wants to manipulate the range partitioning before the devices
23 	 * are created, so do not send them to the dax_kmem driver by
24 	 * default.
25 	 */
26 	if (region_idle)
27 		flags = 0;
28 
29 	mri = dev->platform_data;
30 	dax_region = alloc_dax_region(dev, pdev->id, &mri->range,
31 				      mri->target_node, PMD_SIZE, flags);
32 	if (!dax_region)
33 		return -ENOMEM;
34 
35 	data = (struct dev_dax_data) {
36 		.dax_region = dax_region,
37 		.id = -1,
38 		.size = region_idle ? 0 : range_len(&mri->range),
39 		.memmap_on_memory = false,
40 	};
41 
42 	return PTR_ERR_OR_ZERO(devm_create_dev_dax(&data));
43 }
44 
45 static struct platform_driver dax_hmem_driver = {
46 	.probe = dax_hmem_probe,
47 	.driver = {
48 		.name = "hmem",
49 	},
50 };
51 
release_memregion(void * data)52 static void release_memregion(void *data)
53 {
54 	memregion_free((long) data);
55 }
56 
release_hmem(void * pdev)57 static void release_hmem(void *pdev)
58 {
59 	platform_device_unregister(pdev);
60 }
61 
hmem_register_device(struct device * host,int target_nid,const struct resource * res)62 static int hmem_register_device(struct device *host, int target_nid,
63 				const struct resource *res)
64 {
65 	struct platform_device *pdev;
66 	struct memregion_info info;
67 	long id;
68 	int rc;
69 
70 	if (IS_ENABLED(CONFIG_CXL_REGION) &&
71 	    region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
72 			      IORES_DESC_CXL) != REGION_DISJOINT) {
73 		dev_dbg(host, "deferring range to CXL: %pr\n", res);
74 		return 0;
75 	}
76 
77 	rc = region_intersects(res->start, resource_size(res), IORESOURCE_MEM,
78 			       IORES_DESC_SOFT_RESERVED);
79 	if (rc != REGION_INTERSECTS)
80 		return 0;
81 
82 	id = memregion_alloc(GFP_KERNEL);
83 	if (id < 0) {
84 		dev_err(host, "memregion allocation failure for %pr\n", res);
85 		return -ENOMEM;
86 	}
87 	rc = devm_add_action_or_reset(host, release_memregion, (void *) id);
88 	if (rc)
89 		return rc;
90 
91 	pdev = platform_device_alloc("hmem", id);
92 	if (!pdev) {
93 		dev_err(host, "device allocation failure for %pr\n", res);
94 		return -ENOMEM;
95 	}
96 
97 	pdev->dev.numa_node = numa_map_to_online_node(target_nid);
98 	info = (struct memregion_info) {
99 		.target_node = target_nid,
100 		.range = {
101 			.start = res->start,
102 			.end = res->end,
103 		},
104 	};
105 	rc = platform_device_add_data(pdev, &info, sizeof(info));
106 	if (rc < 0) {
107 		dev_err(host, "memregion_info allocation failure for %pr\n",
108 		       res);
109 		goto out_put;
110 	}
111 
112 	rc = platform_device_add(pdev);
113 	if (rc < 0) {
114 		dev_err(host, "%s add failed for %pr\n", dev_name(&pdev->dev),
115 			res);
116 		goto out_put;
117 	}
118 
119 	return devm_add_action_or_reset(host, release_hmem, pdev);
120 
121 out_put:
122 	platform_device_put(pdev);
123 	return rc;
124 }
125 
dax_hmem_platform_probe(struct platform_device * pdev)126 static int dax_hmem_platform_probe(struct platform_device *pdev)
127 {
128 	return walk_hmem_resources(&pdev->dev, hmem_register_device);
129 }
130 
131 static struct platform_driver dax_hmem_platform_driver = {
132 	.probe = dax_hmem_platform_probe,
133 	.driver = {
134 		.name = "hmem_platform",
135 	},
136 };
137 
dax_hmem_init(void)138 static __init int dax_hmem_init(void)
139 {
140 	int rc;
141 
142 	rc = platform_driver_register(&dax_hmem_platform_driver);
143 	if (rc)
144 		return rc;
145 
146 	rc = platform_driver_register(&dax_hmem_driver);
147 	if (rc)
148 		platform_driver_unregister(&dax_hmem_platform_driver);
149 
150 	return rc;
151 }
152 
dax_hmem_exit(void)153 static __exit void dax_hmem_exit(void)
154 {
155 	platform_driver_unregister(&dax_hmem_driver);
156 	platform_driver_unregister(&dax_hmem_platform_driver);
157 }
158 
159 module_init(dax_hmem_init);
160 module_exit(dax_hmem_exit);
161 
162 /* Allow for CXL to define its own dax regions */
163 #if IS_ENABLED(CONFIG_CXL_REGION)
164 #if IS_MODULE(CONFIG_CXL_ACPI)
165 MODULE_SOFTDEP("pre: cxl_acpi");
166 #endif
167 #endif
168 
169 MODULE_ALIAS("platform:hmem*");
170 MODULE_ALIAS("platform:hmem_platform*");
171 MODULE_DESCRIPTION("HMEM DAX: direct access to 'specific purpose' memory");
172 MODULE_LICENSE("GPL v2");
173 MODULE_AUTHOR("Intel Corporation");
174