xref: /linux/tools/testing/cxl/test/mem.c (revision 021bc4b9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright(c) 2021 Intel Corporation. All rights reserved.
3 
4 #include <linux/platform_device.h>
5 #include <linux/mod_devicetable.h>
6 #include <linux/module.h>
7 #include <linux/delay.h>
8 #include <linux/sizes.h>
9 #include <linux/bits.h>
10 #include <asm/unaligned.h>
11 #include <crypto/sha2.h>
12 #include <cxlmem.h>
13 
14 #include "trace.h"
15 
16 #define LSA_SIZE SZ_128K
17 #define FW_SIZE SZ_64M
18 #define FW_SLOTS 3
19 #define DEV_SIZE SZ_2G
20 #define EFFECT(x) (1U << x)
21 
22 #define MOCK_INJECT_DEV_MAX 8
23 #define MOCK_INJECT_TEST_MAX 128
24 
25 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
26 
27 enum cxl_command_effects {
28 	CONF_CHANGE_COLD_RESET = 0,
29 	CONF_CHANGE_IMMEDIATE,
30 	DATA_CHANGE_IMMEDIATE,
31 	POLICY_CHANGE_IMMEDIATE,
32 	LOG_CHANGE_IMMEDIATE,
33 	SECURITY_CHANGE_IMMEDIATE,
34 	BACKGROUND_OP,
35 	SECONDARY_MBOX_SUPPORTED,
36 };
37 
38 #define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
39 
40 static struct cxl_cel_entry mock_cel[] = {
41 	{
42 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
43 		.effect = CXL_CMD_EFFECT_NONE,
44 	},
45 	{
46 		.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
47 		.effect = CXL_CMD_EFFECT_NONE,
48 	},
49 	{
50 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
51 		.effect = CXL_CMD_EFFECT_NONE,
52 	},
53 	{
54 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
55 		.effect = CXL_CMD_EFFECT_NONE,
56 	},
57 	{
58 		.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
59 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
60 				      EFFECT(DATA_CHANGE_IMMEDIATE)),
61 	},
62 	{
63 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
64 		.effect = CXL_CMD_EFFECT_NONE,
65 	},
66 	{
67 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
68 		.effect = CXL_CMD_EFFECT_NONE,
69 	},
70 	{
71 		.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
72 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
73 	},
74 	{
75 		.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
76 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
77 	},
78 	{
79 		.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
80 		.effect = CXL_CMD_EFFECT_NONE,
81 	},
82 	{
83 		.opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
84 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
85 				      EFFECT(BACKGROUND_OP)),
86 	},
87 	{
88 		.opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
89 		.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
90 				      EFFECT(CONF_CHANGE_IMMEDIATE)),
91 	},
92 	{
93 		.opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
94 		.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
95 				      EFFECT(SECURITY_CHANGE_IMMEDIATE) |
96 				      EFFECT(BACKGROUND_OP)),
97 	},
98 };
99 
100 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
101 struct cxl_mbox_health_info {
102 	u8 health_status;
103 	u8 media_status;
104 	u8 ext_status;
105 	u8 life_used;
106 	__le16 temperature;
107 	__le32 dirty_shutdowns;
108 	__le32 volatile_errors;
109 	__le32 pmem_errors;
110 } __packed;
111 
112 static struct {
113 	struct cxl_mbox_get_supported_logs gsl;
114 	struct cxl_gsl_entry entry;
115 } mock_gsl_payload = {
116 	.gsl = {
117 		.entries = cpu_to_le16(1),
118 	},
119 	.entry = {
120 		.uuid = DEFINE_CXL_CEL_UUID,
121 		.size = cpu_to_le32(sizeof(mock_cel)),
122 	},
123 };
124 
125 #define PASS_TRY_LIMIT 3
126 
127 #define CXL_TEST_EVENT_CNT_MAX 15
128 
129 /* Set a number of events to return at a time for simulation.  */
130 #define CXL_TEST_EVENT_CNT 3
131 
132 struct mock_event_log {
133 	u16 clear_idx;
134 	u16 cur_idx;
135 	u16 nr_events;
136 	u16 nr_overflow;
137 	u16 overflow_reset;
138 	struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
139 };
140 
141 struct mock_event_store {
142 	struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
143 	u32 ev_status;
144 };
145 
146 struct cxl_mockmem_data {
147 	void *lsa;
148 	void *fw;
149 	int fw_slot;
150 	int fw_staged;
151 	size_t fw_size;
152 	u32 security_state;
153 	u8 user_pass[NVDIMM_PASSPHRASE_LEN];
154 	u8 master_pass[NVDIMM_PASSPHRASE_LEN];
155 	int user_limit;
156 	int master_limit;
157 	struct mock_event_store mes;
158 	struct cxl_memdev_state *mds;
159 	u8 event_buf[SZ_4K];
160 	u64 timestamp;
161 	unsigned long sanitize_timeout;
162 };
163 
164 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
165 {
166 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
167 
168 	if (log_type >= CXL_EVENT_TYPE_MAX)
169 		return NULL;
170 	return &mdata->mes.mock_logs[log_type];
171 }
172 
173 static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
174 {
175 	return log->events[log->cur_idx];
176 }
177 
178 static void event_reset_log(struct mock_event_log *log)
179 {
180 	log->cur_idx = 0;
181 	log->clear_idx = 0;
182 	log->nr_overflow = log->overflow_reset;
183 }
184 
185 /* Handle can never be 0 use 1 based indexing for handle */
186 static u16 event_get_clear_handle(struct mock_event_log *log)
187 {
188 	return log->clear_idx + 1;
189 }
190 
191 /* Handle can never be 0 use 1 based indexing for handle */
192 static __le16 event_get_cur_event_handle(struct mock_event_log *log)
193 {
194 	u16 cur_handle = log->cur_idx + 1;
195 
196 	return cpu_to_le16(cur_handle);
197 }
198 
199 static bool event_log_empty(struct mock_event_log *log)
200 {
201 	return log->cur_idx == log->nr_events;
202 }
203 
204 static void mes_add_event(struct mock_event_store *mes,
205 			  enum cxl_event_log_type log_type,
206 			  struct cxl_event_record_raw *event)
207 {
208 	struct mock_event_log *log;
209 
210 	if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
211 		return;
212 
213 	log = &mes->mock_logs[log_type];
214 
215 	if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
216 		log->nr_overflow++;
217 		log->overflow_reset = log->nr_overflow;
218 		return;
219 	}
220 
221 	log->events[log->nr_events] = event;
222 	log->nr_events++;
223 }
224 
225 static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
226 {
227 	struct cxl_get_event_payload *pl;
228 	struct mock_event_log *log;
229 	u16 nr_overflow;
230 	u8 log_type;
231 	int i;
232 
233 	if (cmd->size_in != sizeof(log_type))
234 		return -EINVAL;
235 
236 	if (cmd->size_out < struct_size(pl, records, CXL_TEST_EVENT_CNT))
237 		return -EINVAL;
238 
239 	log_type = *((u8 *)cmd->payload_in);
240 	if (log_type >= CXL_EVENT_TYPE_MAX)
241 		return -EINVAL;
242 
243 	memset(cmd->payload_out, 0, cmd->size_out);
244 
245 	log = event_find_log(dev, log_type);
246 	if (!log || event_log_empty(log))
247 		return 0;
248 
249 	pl = cmd->payload_out;
250 
251 	for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
252 		memcpy(&pl->records[i], event_get_current(log),
253 		       sizeof(pl->records[i]));
254 		pl->records[i].event.generic.hdr.handle =
255 				event_get_cur_event_handle(log);
256 		log->cur_idx++;
257 	}
258 
259 	pl->record_count = cpu_to_le16(i);
260 	if (!event_log_empty(log))
261 		pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
262 
263 	if (log->nr_overflow) {
264 		u64 ns;
265 
266 		pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
267 		pl->overflow_err_count = cpu_to_le16(nr_overflow);
268 		ns = ktime_get_real_ns();
269 		ns -= 5000000000; /* 5s ago */
270 		pl->first_overflow_timestamp = cpu_to_le64(ns);
271 		ns = ktime_get_real_ns();
272 		ns -= 1000000000; /* 1s ago */
273 		pl->last_overflow_timestamp = cpu_to_le64(ns);
274 	}
275 
276 	return 0;
277 }
278 
279 static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
280 {
281 	struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
282 	struct mock_event_log *log;
283 	u8 log_type = pl->event_log;
284 	u16 handle;
285 	int nr;
286 
287 	if (log_type >= CXL_EVENT_TYPE_MAX)
288 		return -EINVAL;
289 
290 	log = event_find_log(dev, log_type);
291 	if (!log)
292 		return 0; /* No mock data in this log */
293 
294 	/*
295 	 * This check is technically not invalid per the specification AFAICS.
296 	 * (The host could 'guess' handles and clear them in order).
297 	 * However, this is not good behavior for the host so test it.
298 	 */
299 	if (log->clear_idx + pl->nr_recs > log->cur_idx) {
300 		dev_err(dev,
301 			"Attempting to clear more events than returned!\n");
302 		return -EINVAL;
303 	}
304 
305 	/* Check handle order prior to clearing events */
306 	for (nr = 0, handle = event_get_clear_handle(log);
307 	     nr < pl->nr_recs;
308 	     nr++, handle++) {
309 		if (handle != le16_to_cpu(pl->handles[nr])) {
310 			dev_err(dev, "Clearing events out of order\n");
311 			return -EINVAL;
312 		}
313 	}
314 
315 	if (log->nr_overflow)
316 		log->nr_overflow = 0;
317 
318 	/* Clear events */
319 	log->clear_idx += pl->nr_recs;
320 	return 0;
321 }
322 
323 static void cxl_mock_event_trigger(struct device *dev)
324 {
325 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
326 	struct mock_event_store *mes = &mdata->mes;
327 	int i;
328 
329 	for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
330 		struct mock_event_log *log;
331 
332 		log = event_find_log(dev, i);
333 		if (log)
334 			event_reset_log(log);
335 	}
336 
337 	cxl_mem_get_event_records(mdata->mds, mes->ev_status);
338 }
339 
340 struct cxl_event_record_raw maint_needed = {
341 	.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
342 			0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
343 	.event.generic = {
344 		.hdr = {
345 			.length = sizeof(struct cxl_event_record_raw),
346 			.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
347 			/* .handle = Set dynamically */
348 			.related_handle = cpu_to_le16(0xa5b6),
349 		},
350 		.data = { 0xDE, 0xAD, 0xBE, 0xEF },
351 	},
352 };
353 
354 struct cxl_event_record_raw hardware_replace = {
355 	.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
356 			0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
357 	.event.generic = {
358 		.hdr = {
359 			.length = sizeof(struct cxl_event_record_raw),
360 			.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
361 			/* .handle = Set dynamically */
362 			.related_handle = cpu_to_le16(0xb6a5),
363 		},
364 		.data = { 0xDE, 0xAD, 0xBE, 0xEF },
365 	},
366 };
367 
368 struct cxl_test_gen_media {
369 	uuid_t id;
370 	struct cxl_event_gen_media rec;
371 } __packed;
372 
373 struct cxl_test_gen_media gen_media = {
374 	.id = CXL_EVENT_GEN_MEDIA_UUID,
375 	.rec = {
376 		.hdr = {
377 			.length = sizeof(struct cxl_test_gen_media),
378 			.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
379 			/* .handle = Set dynamically */
380 			.related_handle = cpu_to_le16(0),
381 		},
382 		.phys_addr = cpu_to_le64(0x2000),
383 		.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
384 		.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
385 		.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
386 		/* .validity_flags = <set below> */
387 		.channel = 1,
388 		.rank = 30
389 	},
390 };
391 
392 struct cxl_test_dram {
393 	uuid_t id;
394 	struct cxl_event_dram rec;
395 } __packed;
396 
397 struct cxl_test_dram dram = {
398 	.id = CXL_EVENT_DRAM_UUID,
399 	.rec = {
400 		.hdr = {
401 			.length = sizeof(struct cxl_test_dram),
402 			.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
403 			/* .handle = Set dynamically */
404 			.related_handle = cpu_to_le16(0),
405 		},
406 		.phys_addr = cpu_to_le64(0x8000),
407 		.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
408 		.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
409 		.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
410 		/* .validity_flags = <set below> */
411 		.channel = 1,
412 		.bank_group = 5,
413 		.bank = 2,
414 		.column = {0xDE, 0xAD},
415 	},
416 };
417 
418 struct cxl_test_mem_module {
419 	uuid_t id;
420 	struct cxl_event_mem_module rec;
421 } __packed;
422 
423 struct cxl_test_mem_module mem_module = {
424 	.id = CXL_EVENT_MEM_MODULE_UUID,
425 	.rec = {
426 		.hdr = {
427 			.length = sizeof(struct cxl_test_mem_module),
428 			/* .handle = Set dynamically */
429 			.related_handle = cpu_to_le16(0),
430 		},
431 		.event_type = CXL_MMER_TEMP_CHANGE,
432 		.info = {
433 			.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
434 			.media_status = CXL_DHI_MS_ALL_DATA_LOST,
435 			.add_status = (CXL_DHI_AS_CRITICAL << 2) |
436 				      (CXL_DHI_AS_WARNING << 4) |
437 				      (CXL_DHI_AS_WARNING << 5),
438 			.device_temp = { 0xDE, 0xAD},
439 			.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
440 			.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
441 			.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
442 		}
443 	},
444 };
445 
446 static int mock_set_timestamp(struct cxl_dev_state *cxlds,
447 			      struct cxl_mbox_cmd *cmd)
448 {
449 	struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
450 	struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
451 
452 	if (cmd->size_in != sizeof(*ts))
453 		return -EINVAL;
454 
455 	if (cmd->size_out != 0)
456 		return -EINVAL;
457 
458 	mdata->timestamp = le64_to_cpu(ts->timestamp);
459 	return 0;
460 }
461 
462 static void cxl_mock_add_event_logs(struct mock_event_store *mes)
463 {
464 	put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
465 			   &gen_media.rec.validity_flags);
466 
467 	put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
468 			   CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
469 			   &dram.rec.validity_flags);
470 
471 	mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
472 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
473 		      (struct cxl_event_record_raw *)&gen_media);
474 	mes_add_event(mes, CXL_EVENT_TYPE_INFO,
475 		      (struct cxl_event_record_raw *)&mem_module);
476 	mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
477 
478 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
479 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
480 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
481 		      (struct cxl_event_record_raw *)&dram);
482 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
483 		      (struct cxl_event_record_raw *)&gen_media);
484 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
485 		      (struct cxl_event_record_raw *)&mem_module);
486 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
487 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
488 		      (struct cxl_event_record_raw *)&dram);
489 	/* Overflow this log */
490 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
491 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
492 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
493 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
494 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
495 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
496 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
497 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
498 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
499 	mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
500 	mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
501 
502 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
503 	mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
504 		      (struct cxl_event_record_raw *)&dram);
505 	mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
506 }
507 
508 static int mock_gsl(struct cxl_mbox_cmd *cmd)
509 {
510 	if (cmd->size_out < sizeof(mock_gsl_payload))
511 		return -EINVAL;
512 
513 	memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
514 	cmd->size_out = sizeof(mock_gsl_payload);
515 
516 	return 0;
517 }
518 
519 static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
520 {
521 	struct cxl_mbox_get_log *gl = cmd->payload_in;
522 	u32 offset = le32_to_cpu(gl->offset);
523 	u32 length = le32_to_cpu(gl->length);
524 	uuid_t uuid = DEFINE_CXL_CEL_UUID;
525 	void *data = &mock_cel;
526 
527 	if (cmd->size_in < sizeof(*gl))
528 		return -EINVAL;
529 	if (length > mds->payload_size)
530 		return -EINVAL;
531 	if (offset + length > sizeof(mock_cel))
532 		return -EINVAL;
533 	if (!uuid_equal(&gl->uuid, &uuid))
534 		return -EINVAL;
535 	if (length > cmd->size_out)
536 		return -EINVAL;
537 
538 	memcpy(cmd->payload_out, data + offset, length);
539 
540 	return 0;
541 }
542 
543 static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
544 {
545 	struct cxl_mbox_identify id = {
546 		.fw_revision = { "mock fw v1 " },
547 		.total_capacity =
548 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
549 		.volatile_capacity =
550 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
551 	};
552 
553 	if (cmd->size_out < sizeof(id))
554 		return -EINVAL;
555 
556 	memcpy(cmd->payload_out, &id, sizeof(id));
557 
558 	return 0;
559 }
560 
561 static int mock_id(struct cxl_mbox_cmd *cmd)
562 {
563 	struct cxl_mbox_identify id = {
564 		.fw_revision = { "mock fw v1 " },
565 		.lsa_size = cpu_to_le32(LSA_SIZE),
566 		.partition_align =
567 			cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
568 		.total_capacity =
569 			cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
570 		.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
571 	};
572 
573 	put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
574 
575 	if (cmd->size_out < sizeof(id))
576 		return -EINVAL;
577 
578 	memcpy(cmd->payload_out, &id, sizeof(id));
579 
580 	return 0;
581 }
582 
583 static int mock_partition_info(struct cxl_mbox_cmd *cmd)
584 {
585 	struct cxl_mbox_get_partition_info pi = {
586 		.active_volatile_cap =
587 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
588 		.active_persistent_cap =
589 			cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
590 	};
591 
592 	if (cmd->size_out < sizeof(pi))
593 		return -EINVAL;
594 
595 	memcpy(cmd->payload_out, &pi, sizeof(pi));
596 
597 	return 0;
598 }
599 
600 void cxl_mockmem_sanitize_work(struct work_struct *work)
601 {
602 	struct cxl_memdev_state *mds =
603 		container_of(work, typeof(*mds), security.poll_dwork.work);
604 
605 	mutex_lock(&mds->mbox_mutex);
606 	if (mds->security.sanitize_node)
607 		sysfs_notify_dirent(mds->security.sanitize_node);
608 	mds->security.sanitize_active = false;
609 	mutex_unlock(&mds->mbox_mutex);
610 
611 	dev_dbg(mds->cxlds.dev, "sanitize complete\n");
612 }
613 
614 static int mock_sanitize(struct cxl_mockmem_data *mdata,
615 			 struct cxl_mbox_cmd *cmd)
616 {
617 	struct cxl_memdev_state *mds = mdata->mds;
618 	int rc = 0;
619 
620 	if (cmd->size_in != 0)
621 		return -EINVAL;
622 
623 	if (cmd->size_out != 0)
624 		return -EINVAL;
625 
626 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
627 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
628 		return -ENXIO;
629 	}
630 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
631 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
632 		return -ENXIO;
633 	}
634 
635 	mutex_lock(&mds->mbox_mutex);
636 	if (schedule_delayed_work(&mds->security.poll_dwork,
637 				  msecs_to_jiffies(mdata->sanitize_timeout))) {
638 		mds->security.sanitize_active = true;
639 		dev_dbg(mds->cxlds.dev, "sanitize issued\n");
640 	} else
641 		rc = -EBUSY;
642 	mutex_unlock(&mds->mbox_mutex);
643 
644 	return rc;
645 }
646 
647 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
648 			     struct cxl_mbox_cmd *cmd)
649 {
650 	if (cmd->size_in != 0)
651 		return -EINVAL;
652 
653 	if (cmd->size_out != 0)
654 		return -EINVAL;
655 
656 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
657 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
658 		return -ENXIO;
659 	}
660 
661 	if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
662 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
663 		return -ENXIO;
664 	}
665 
666 	return 0;
667 }
668 
669 static int mock_get_security_state(struct cxl_mockmem_data *mdata,
670 				   struct cxl_mbox_cmd *cmd)
671 {
672 	if (cmd->size_in)
673 		return -EINVAL;
674 
675 	if (cmd->size_out != sizeof(u32))
676 		return -EINVAL;
677 
678 	memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
679 
680 	return 0;
681 }
682 
683 static void master_plimit_check(struct cxl_mockmem_data *mdata)
684 {
685 	if (mdata->master_limit == PASS_TRY_LIMIT)
686 		return;
687 	mdata->master_limit++;
688 	if (mdata->master_limit == PASS_TRY_LIMIT)
689 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
690 }
691 
692 static void user_plimit_check(struct cxl_mockmem_data *mdata)
693 {
694 	if (mdata->user_limit == PASS_TRY_LIMIT)
695 		return;
696 	mdata->user_limit++;
697 	if (mdata->user_limit == PASS_TRY_LIMIT)
698 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
699 }
700 
701 static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
702 			       struct cxl_mbox_cmd *cmd)
703 {
704 	struct cxl_set_pass *set_pass;
705 
706 	if (cmd->size_in != sizeof(*set_pass))
707 		return -EINVAL;
708 
709 	if (cmd->size_out != 0)
710 		return -EINVAL;
711 
712 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
713 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
714 		return -ENXIO;
715 	}
716 
717 	set_pass = cmd->payload_in;
718 	switch (set_pass->type) {
719 	case CXL_PMEM_SEC_PASS_MASTER:
720 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
721 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
722 			return -ENXIO;
723 		}
724 		/*
725 		 * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
726 		 * the security disabled state when the user passphrase is not set.
727 		 */
728 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
729 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
730 			return -ENXIO;
731 		}
732 		if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
733 			master_plimit_check(mdata);
734 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
735 			return -ENXIO;
736 		}
737 		memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
738 		mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
739 		return 0;
740 
741 	case CXL_PMEM_SEC_PASS_USER:
742 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
743 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
744 			return -ENXIO;
745 		}
746 		if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
747 			user_plimit_check(mdata);
748 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
749 			return -ENXIO;
750 		}
751 		memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
752 		mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
753 		return 0;
754 
755 	default:
756 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
757 	}
758 	return -EINVAL;
759 }
760 
761 static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
762 				   struct cxl_mbox_cmd *cmd)
763 {
764 	struct cxl_disable_pass *dis_pass;
765 
766 	if (cmd->size_in != sizeof(*dis_pass))
767 		return -EINVAL;
768 
769 	if (cmd->size_out != 0)
770 		return -EINVAL;
771 
772 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
773 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
774 		return -ENXIO;
775 	}
776 
777 	dis_pass = cmd->payload_in;
778 	switch (dis_pass->type) {
779 	case CXL_PMEM_SEC_PASS_MASTER:
780 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
781 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
782 			return -ENXIO;
783 		}
784 
785 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
786 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
787 			return -ENXIO;
788 		}
789 
790 		if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
791 			master_plimit_check(mdata);
792 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
793 			return -ENXIO;
794 		}
795 
796 		mdata->master_limit = 0;
797 		memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
798 		mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
799 		return 0;
800 
801 	case CXL_PMEM_SEC_PASS_USER:
802 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
803 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
804 			return -ENXIO;
805 		}
806 
807 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
808 			cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
809 			return -ENXIO;
810 		}
811 
812 		if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
813 			user_plimit_check(mdata);
814 			cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
815 			return -ENXIO;
816 		}
817 
818 		mdata->user_limit = 0;
819 		memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
820 		mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
821 					   CXL_PMEM_SEC_STATE_LOCKED);
822 		return 0;
823 
824 	default:
825 		cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
826 		return -EINVAL;
827 	}
828 
829 	return 0;
830 }
831 
832 static int mock_freeze_security(struct cxl_mockmem_data *mdata,
833 				struct cxl_mbox_cmd *cmd)
834 {
835 	if (cmd->size_in != 0)
836 		return -EINVAL;
837 
838 	if (cmd->size_out != 0)
839 		return -EINVAL;
840 
841 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
842 		return 0;
843 
844 	mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
845 	return 0;
846 }
847 
848 static int mock_unlock_security(struct cxl_mockmem_data *mdata,
849 				struct cxl_mbox_cmd *cmd)
850 {
851 	if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
852 		return -EINVAL;
853 
854 	if (cmd->size_out != 0)
855 		return -EINVAL;
856 
857 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
858 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
859 		return -ENXIO;
860 	}
861 
862 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
863 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
864 		return -ENXIO;
865 	}
866 
867 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
868 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
869 		return -ENXIO;
870 	}
871 
872 	if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
873 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
874 		return -ENXIO;
875 	}
876 
877 	if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
878 		if (++mdata->user_limit == PASS_TRY_LIMIT)
879 			mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
880 		cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
881 		return -ENXIO;
882 	}
883 
884 	mdata->user_limit = 0;
885 	mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
886 	return 0;
887 }
888 
889 static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
890 					struct cxl_mbox_cmd *cmd)
891 {
892 	struct cxl_pass_erase *erase;
893 
894 	if (cmd->size_in != sizeof(*erase))
895 		return -EINVAL;
896 
897 	if (cmd->size_out != 0)
898 		return -EINVAL;
899 
900 	erase = cmd->payload_in;
901 	if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
902 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
903 		return -ENXIO;
904 	}
905 
906 	if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
907 	    erase->type == CXL_PMEM_SEC_PASS_USER) {
908 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
909 		return -ENXIO;
910 	}
911 
912 	if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
913 	    erase->type == CXL_PMEM_SEC_PASS_MASTER) {
914 		cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
915 		return -ENXIO;
916 	}
917 
918 	switch (erase->type) {
919 	case CXL_PMEM_SEC_PASS_MASTER:
920 		/*
921 		 * The spec does not clearly define the behavior of the scenario
922 		 * where a master passphrase is passed in while the master
923 		 * passphrase is not set and user passphrase is not set. The
924 		 * code will take the assumption that it will behave the same
925 		 * as a CXL secure erase command without passphrase (0x4401).
926 		 */
927 		if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
928 			if (memcmp(mdata->master_pass, erase->pass,
929 				   NVDIMM_PASSPHRASE_LEN)) {
930 				master_plimit_check(mdata);
931 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
932 				return -ENXIO;
933 			}
934 			mdata->master_limit = 0;
935 			mdata->user_limit = 0;
936 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
937 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
938 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
939 		} else {
940 			/*
941 			 * CXL rev3 8.2.9.8.6.3 Disable Passphrase
942 			 * When master passphrase is disabled, the device shall
943 			 * return Invalid Input for the Passphrase Secure Erase
944 			 * command with master passphrase.
945 			 */
946 			return -EINVAL;
947 		}
948 		/* Scramble encryption keys so that data is effectively erased */
949 		break;
950 	case CXL_PMEM_SEC_PASS_USER:
951 		/*
952 		 * The spec does not clearly define the behavior of the scenario
953 		 * where a user passphrase is passed in while the user
954 		 * passphrase is not set. The code will take the assumption that
955 		 * it will behave the same as a CXL secure erase command without
956 		 * passphrase (0x4401).
957 		 */
958 		if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
959 			if (memcmp(mdata->user_pass, erase->pass,
960 				   NVDIMM_PASSPHRASE_LEN)) {
961 				user_plimit_check(mdata);
962 				cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
963 				return -ENXIO;
964 			}
965 			mdata->user_limit = 0;
966 			mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
967 			memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
968 		}
969 
970 		/*
971 		 * CXL rev3 Table 8-118
972 		 * If user passphrase is not set or supported by device, current
973 		 * passphrase value is ignored. Will make the assumption that
974 		 * the operation will proceed as secure erase w/o passphrase
975 		 * since spec is not explicit.
976 		 */
977 
978 		/* Scramble encryption keys so that data is effectively erased */
979 		break;
980 	default:
981 		return -EINVAL;
982 	}
983 
984 	return 0;
985 }
986 
987 static int mock_get_lsa(struct cxl_mockmem_data *mdata,
988 			struct cxl_mbox_cmd *cmd)
989 {
990 	struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
991 	void *lsa = mdata->lsa;
992 	u32 offset, length;
993 
994 	if (sizeof(*get_lsa) > cmd->size_in)
995 		return -EINVAL;
996 	offset = le32_to_cpu(get_lsa->offset);
997 	length = le32_to_cpu(get_lsa->length);
998 	if (offset + length > LSA_SIZE)
999 		return -EINVAL;
1000 	if (length > cmd->size_out)
1001 		return -EINVAL;
1002 
1003 	memcpy(cmd->payload_out, lsa + offset, length);
1004 	return 0;
1005 }
1006 
1007 static int mock_set_lsa(struct cxl_mockmem_data *mdata,
1008 			struct cxl_mbox_cmd *cmd)
1009 {
1010 	struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
1011 	void *lsa = mdata->lsa;
1012 	u32 offset, length;
1013 
1014 	if (sizeof(*set_lsa) > cmd->size_in)
1015 		return -EINVAL;
1016 	offset = le32_to_cpu(set_lsa->offset);
1017 	length = cmd->size_in - sizeof(*set_lsa);
1018 	if (offset + length > LSA_SIZE)
1019 		return -EINVAL;
1020 
1021 	memcpy(lsa + offset, &set_lsa->data[0], length);
1022 	return 0;
1023 }
1024 
1025 static int mock_health_info(struct cxl_mbox_cmd *cmd)
1026 {
1027 	struct cxl_mbox_health_info health_info = {
1028 		/* set flags for maint needed, perf degraded, hw replacement */
1029 		.health_status = 0x7,
1030 		/* set media status to "All Data Lost" */
1031 		.media_status = 0x3,
1032 		/*
1033 		 * set ext_status flags for:
1034 		 *  ext_life_used: normal,
1035 		 *  ext_temperature: critical,
1036 		 *  ext_corrected_volatile: warning,
1037 		 *  ext_corrected_persistent: normal,
1038 		 */
1039 		.ext_status = 0x18,
1040 		.life_used = 15,
1041 		.temperature = cpu_to_le16(25),
1042 		.dirty_shutdowns = cpu_to_le32(10),
1043 		.volatile_errors = cpu_to_le32(20),
1044 		.pmem_errors = cpu_to_le32(30),
1045 	};
1046 
1047 	if (cmd->size_out < sizeof(health_info))
1048 		return -EINVAL;
1049 
1050 	memcpy(cmd->payload_out, &health_info, sizeof(health_info));
1051 	return 0;
1052 }
1053 
1054 static struct mock_poison {
1055 	struct cxl_dev_state *cxlds;
1056 	u64 dpa;
1057 } mock_poison_list[MOCK_INJECT_TEST_MAX];
1058 
1059 static struct cxl_mbox_poison_out *
1060 cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1061 {
1062 	struct cxl_mbox_poison_out *po;
1063 	int nr_records = 0;
1064 	u64 dpa;
1065 
1066 	po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1067 	if (!po)
1068 		return NULL;
1069 
1070 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1071 		if (mock_poison_list[i].cxlds != cxlds)
1072 			continue;
1073 		if (mock_poison_list[i].dpa < offset ||
1074 		    mock_poison_list[i].dpa > offset + length - 1)
1075 			continue;
1076 
1077 		dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1078 		po->record[nr_records].address = cpu_to_le64(dpa);
1079 		po->record[nr_records].length = cpu_to_le32(1);
1080 		nr_records++;
1081 		if (nr_records == poison_inject_dev_max)
1082 			break;
1083 	}
1084 
1085 	/* Always return count, even when zero */
1086 	po->count = cpu_to_le16(nr_records);
1087 
1088 	return po;
1089 }
1090 
1091 static int mock_get_poison(struct cxl_dev_state *cxlds,
1092 			   struct cxl_mbox_cmd *cmd)
1093 {
1094 	struct cxl_mbox_poison_in *pi = cmd->payload_in;
1095 	struct cxl_mbox_poison_out *po;
1096 	u64 offset = le64_to_cpu(pi->offset);
1097 	u64 length = le64_to_cpu(pi->length);
1098 	int nr_records;
1099 
1100 	po = cxl_get_injected_po(cxlds, offset, length);
1101 	if (!po)
1102 		return -ENOMEM;
1103 	nr_records = le16_to_cpu(po->count);
1104 	memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1105 	cmd->size_out = struct_size(po, record, nr_records);
1106 	kfree(po);
1107 
1108 	return 0;
1109 }
1110 
1111 static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1112 {
1113 	int count = 0;
1114 
1115 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1116 		if (mock_poison_list[i].cxlds == cxlds)
1117 			count++;
1118 	}
1119 	return (count >= poison_inject_dev_max);
1120 }
1121 
1122 static bool mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1123 {
1124 	if (mock_poison_dev_max_injected(cxlds)) {
1125 		dev_dbg(cxlds->dev,
1126 			"Device poison injection limit has been reached: %d\n",
1127 			MOCK_INJECT_DEV_MAX);
1128 		return false;
1129 	}
1130 
1131 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1132 		if (!mock_poison_list[i].cxlds) {
1133 			mock_poison_list[i].cxlds = cxlds;
1134 			mock_poison_list[i].dpa = dpa;
1135 			return true;
1136 		}
1137 	}
1138 	dev_dbg(cxlds->dev,
1139 		"Mock test poison injection limit has been reached: %d\n",
1140 		MOCK_INJECT_TEST_MAX);
1141 
1142 	return false;
1143 }
1144 
1145 static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1146 {
1147 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1148 		if (mock_poison_list[i].cxlds == cxlds &&
1149 		    mock_poison_list[i].dpa == dpa)
1150 			return true;
1151 	}
1152 	return false;
1153 }
1154 
1155 static int mock_inject_poison(struct cxl_dev_state *cxlds,
1156 			      struct cxl_mbox_cmd *cmd)
1157 {
1158 	struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1159 	u64 dpa = le64_to_cpu(pi->address);
1160 
1161 	if (mock_poison_found(cxlds, dpa)) {
1162 		/* Not an error to inject poison if already poisoned */
1163 		dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1164 		return 0;
1165 	}
1166 	if (!mock_poison_add(cxlds, dpa))
1167 		return -ENXIO;
1168 
1169 	return 0;
1170 }
1171 
1172 static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1173 {
1174 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1175 		if (mock_poison_list[i].cxlds == cxlds &&
1176 		    mock_poison_list[i].dpa == dpa) {
1177 			mock_poison_list[i].cxlds = NULL;
1178 			return true;
1179 		}
1180 	}
1181 	return false;
1182 }
1183 
1184 static int mock_clear_poison(struct cxl_dev_state *cxlds,
1185 			     struct cxl_mbox_cmd *cmd)
1186 {
1187 	struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1188 	u64 dpa = le64_to_cpu(pi->address);
1189 
1190 	/*
1191 	 * A real CXL device will write pi->write_data to the address
1192 	 * being cleared. In this mock, just delete this address from
1193 	 * the mock poison list.
1194 	 */
1195 	if (!mock_poison_del(cxlds, dpa))
1196 		dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1197 
1198 	return 0;
1199 }
1200 
1201 static bool mock_poison_list_empty(void)
1202 {
1203 	for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1204 		if (mock_poison_list[i].cxlds)
1205 			return false;
1206 	}
1207 	return true;
1208 }
1209 
1210 static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1211 {
1212 	return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1213 }
1214 
1215 static ssize_t poison_inject_max_store(struct device_driver *drv,
1216 				       const char *buf, size_t len)
1217 {
1218 	int val;
1219 
1220 	if (kstrtoint(buf, 0, &val) < 0)
1221 		return -EINVAL;
1222 
1223 	if (!mock_poison_list_empty())
1224 		return -EBUSY;
1225 
1226 	if (val <= MOCK_INJECT_TEST_MAX)
1227 		poison_inject_dev_max = val;
1228 	else
1229 		return -EINVAL;
1230 
1231 	return len;
1232 }
1233 
1234 static DRIVER_ATTR_RW(poison_inject_max);
1235 
1236 static struct attribute *cxl_mock_mem_core_attrs[] = {
1237 	&driver_attr_poison_inject_max.attr,
1238 	NULL
1239 };
1240 ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1241 
1242 static int mock_fw_info(struct cxl_mockmem_data *mdata,
1243 			struct cxl_mbox_cmd *cmd)
1244 {
1245 	struct cxl_mbox_get_fw_info fw_info = {
1246 		.num_slots = FW_SLOTS,
1247 		.slot_info = (mdata->fw_slot & 0x7) |
1248 			     ((mdata->fw_staged & 0x7) << 3),
1249 		.activation_cap = 0,
1250 	};
1251 
1252 	strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1253 	strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1254 	strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1255 	strcpy(fw_info.slot_4_revision, "");
1256 
1257 	if (cmd->size_out < sizeof(fw_info))
1258 		return -EINVAL;
1259 
1260 	memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1261 	return 0;
1262 }
1263 
1264 static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1265 			    struct cxl_mbox_cmd *cmd)
1266 {
1267 	struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1268 	void *fw = mdata->fw;
1269 	size_t offset, length;
1270 
1271 	offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1272 	length = cmd->size_in - sizeof(*transfer);
1273 	if (offset + length > FW_SIZE)
1274 		return -EINVAL;
1275 
1276 	switch (transfer->action) {
1277 	case CXL_FW_TRANSFER_ACTION_FULL:
1278 		if (offset != 0)
1279 			return -EINVAL;
1280 		fallthrough;
1281 	case CXL_FW_TRANSFER_ACTION_END:
1282 		if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1283 			return -EINVAL;
1284 		mdata->fw_size = offset + length;
1285 		break;
1286 	case CXL_FW_TRANSFER_ACTION_INITIATE:
1287 	case CXL_FW_TRANSFER_ACTION_CONTINUE:
1288 		break;
1289 	case CXL_FW_TRANSFER_ACTION_ABORT:
1290 		return 0;
1291 	default:
1292 		return -EINVAL;
1293 	}
1294 
1295 	memcpy(fw + offset, transfer->data, length);
1296 	usleep_range(1500, 2000);
1297 	return 0;
1298 }
1299 
1300 static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1301 			    struct cxl_mbox_cmd *cmd)
1302 {
1303 	struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1304 
1305 	if (activate->slot == 0 || activate->slot > FW_SLOTS)
1306 		return -EINVAL;
1307 
1308 	switch (activate->action) {
1309 	case CXL_FW_ACTIVATE_ONLINE:
1310 		mdata->fw_slot = activate->slot;
1311 		mdata->fw_staged = 0;
1312 		return 0;
1313 	case CXL_FW_ACTIVATE_OFFLINE:
1314 		mdata->fw_staged = activate->slot;
1315 		return 0;
1316 	}
1317 
1318 	return -EINVAL;
1319 }
1320 
1321 static int cxl_mock_mbox_send(struct cxl_memdev_state *mds,
1322 			      struct cxl_mbox_cmd *cmd)
1323 {
1324 	struct cxl_dev_state *cxlds = &mds->cxlds;
1325 	struct device *dev = cxlds->dev;
1326 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1327 	int rc = -EIO;
1328 
1329 	switch (cmd->opcode) {
1330 	case CXL_MBOX_OP_SET_TIMESTAMP:
1331 		rc = mock_set_timestamp(cxlds, cmd);
1332 		break;
1333 	case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1334 		rc = mock_gsl(cmd);
1335 		break;
1336 	case CXL_MBOX_OP_GET_LOG:
1337 		rc = mock_get_log(mds, cmd);
1338 		break;
1339 	case CXL_MBOX_OP_IDENTIFY:
1340 		if (cxlds->rcd)
1341 			rc = mock_rcd_id(cmd);
1342 		else
1343 			rc = mock_id(cmd);
1344 		break;
1345 	case CXL_MBOX_OP_GET_LSA:
1346 		rc = mock_get_lsa(mdata, cmd);
1347 		break;
1348 	case CXL_MBOX_OP_GET_PARTITION_INFO:
1349 		rc = mock_partition_info(cmd);
1350 		break;
1351 	case CXL_MBOX_OP_GET_EVENT_RECORD:
1352 		rc = mock_get_event(dev, cmd);
1353 		break;
1354 	case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1355 		rc = mock_clear_event(dev, cmd);
1356 		break;
1357 	case CXL_MBOX_OP_SET_LSA:
1358 		rc = mock_set_lsa(mdata, cmd);
1359 		break;
1360 	case CXL_MBOX_OP_GET_HEALTH_INFO:
1361 		rc = mock_health_info(cmd);
1362 		break;
1363 	case CXL_MBOX_OP_SANITIZE:
1364 		rc = mock_sanitize(mdata, cmd);
1365 		break;
1366 	case CXL_MBOX_OP_SECURE_ERASE:
1367 		rc = mock_secure_erase(mdata, cmd);
1368 		break;
1369 	case CXL_MBOX_OP_GET_SECURITY_STATE:
1370 		rc = mock_get_security_state(mdata, cmd);
1371 		break;
1372 	case CXL_MBOX_OP_SET_PASSPHRASE:
1373 		rc = mock_set_passphrase(mdata, cmd);
1374 		break;
1375 	case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1376 		rc = mock_disable_passphrase(mdata, cmd);
1377 		break;
1378 	case CXL_MBOX_OP_FREEZE_SECURITY:
1379 		rc = mock_freeze_security(mdata, cmd);
1380 		break;
1381 	case CXL_MBOX_OP_UNLOCK:
1382 		rc = mock_unlock_security(mdata, cmd);
1383 		break;
1384 	case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1385 		rc = mock_passphrase_secure_erase(mdata, cmd);
1386 		break;
1387 	case CXL_MBOX_OP_GET_POISON:
1388 		rc = mock_get_poison(cxlds, cmd);
1389 		break;
1390 	case CXL_MBOX_OP_INJECT_POISON:
1391 		rc = mock_inject_poison(cxlds, cmd);
1392 		break;
1393 	case CXL_MBOX_OP_CLEAR_POISON:
1394 		rc = mock_clear_poison(cxlds, cmd);
1395 		break;
1396 	case CXL_MBOX_OP_GET_FW_INFO:
1397 		rc = mock_fw_info(mdata, cmd);
1398 		break;
1399 	case CXL_MBOX_OP_TRANSFER_FW:
1400 		rc = mock_transfer_fw(mdata, cmd);
1401 		break;
1402 	case CXL_MBOX_OP_ACTIVATE_FW:
1403 		rc = mock_activate_fw(mdata, cmd);
1404 		break;
1405 	default:
1406 		break;
1407 	}
1408 
1409 	dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1410 		cmd->size_in, cmd->size_out, rc);
1411 
1412 	return rc;
1413 }
1414 
1415 static void label_area_release(void *lsa)
1416 {
1417 	vfree(lsa);
1418 }
1419 
1420 static void fw_buf_release(void *buf)
1421 {
1422 	vfree(buf);
1423 }
1424 
1425 static bool is_rcd(struct platform_device *pdev)
1426 {
1427 	const struct platform_device_id *id = platform_get_device_id(pdev);
1428 
1429 	return !!id->driver_data;
1430 }
1431 
1432 static ssize_t event_trigger_store(struct device *dev,
1433 				   struct device_attribute *attr,
1434 				   const char *buf, size_t count)
1435 {
1436 	cxl_mock_event_trigger(dev);
1437 	return count;
1438 }
1439 static DEVICE_ATTR_WO(event_trigger);
1440 
1441 static int cxl_mock_mem_probe(struct platform_device *pdev)
1442 {
1443 	struct device *dev = &pdev->dev;
1444 	struct cxl_memdev *cxlmd;
1445 	struct cxl_memdev_state *mds;
1446 	struct cxl_dev_state *cxlds;
1447 	struct cxl_mockmem_data *mdata;
1448 	int rc;
1449 
1450 	mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1451 	if (!mdata)
1452 		return -ENOMEM;
1453 	dev_set_drvdata(dev, mdata);
1454 
1455 	mdata->lsa = vmalloc(LSA_SIZE);
1456 	if (!mdata->lsa)
1457 		return -ENOMEM;
1458 	mdata->fw = vmalloc(FW_SIZE);
1459 	if (!mdata->fw)
1460 		return -ENOMEM;
1461 	mdata->fw_slot = 2;
1462 
1463 	rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1464 	if (rc)
1465 		return rc;
1466 
1467 	rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1468 	if (rc)
1469 		return rc;
1470 
1471 	mds = cxl_memdev_state_create(dev);
1472 	if (IS_ERR(mds))
1473 		return PTR_ERR(mds);
1474 
1475 	mdata->mds = mds;
1476 	mds->mbox_send = cxl_mock_mbox_send;
1477 	mds->payload_size = SZ_4K;
1478 	mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1479 	INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
1480 
1481 	cxlds = &mds->cxlds;
1482 	cxlds->serial = pdev->id;
1483 	if (is_rcd(pdev))
1484 		cxlds->rcd = true;
1485 
1486 	rc = cxl_enumerate_cmds(mds);
1487 	if (rc)
1488 		return rc;
1489 
1490 	rc = cxl_poison_state_init(mds);
1491 	if (rc)
1492 		return rc;
1493 
1494 	rc = cxl_set_timestamp(mds);
1495 	if (rc)
1496 		return rc;
1497 
1498 	cxlds->media_ready = true;
1499 	rc = cxl_dev_state_identify(mds);
1500 	if (rc)
1501 		return rc;
1502 
1503 	rc = cxl_mem_create_range_info(mds);
1504 	if (rc)
1505 		return rc;
1506 
1507 	cxl_mock_add_event_logs(&mdata->mes);
1508 
1509 	cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1510 	if (IS_ERR(cxlmd))
1511 		return PTR_ERR(cxlmd);
1512 
1513 	rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1514 	if (rc)
1515 		return rc;
1516 
1517 	rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
1518 	if (rc)
1519 		return rc;
1520 
1521 	cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1522 
1523 	return 0;
1524 }
1525 
1526 static ssize_t security_lock_show(struct device *dev,
1527 				  struct device_attribute *attr, char *buf)
1528 {
1529 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1530 
1531 	return sysfs_emit(buf, "%u\n",
1532 			  !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1533 }
1534 
1535 static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1536 				   const char *buf, size_t count)
1537 {
1538 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1539 	u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1540 		   CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1541 	int val;
1542 
1543 	if (kstrtoint(buf, 0, &val) < 0)
1544 		return -EINVAL;
1545 
1546 	if (val == 1) {
1547 		if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1548 			return -ENXIO;
1549 		mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1550 		mdata->security_state &= ~mask;
1551 	} else {
1552 		return -EINVAL;
1553 	}
1554 	return count;
1555 }
1556 
1557 static DEVICE_ATTR_RW(security_lock);
1558 
1559 static ssize_t fw_buf_checksum_show(struct device *dev,
1560 				    struct device_attribute *attr, char *buf)
1561 {
1562 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1563 	u8 hash[SHA256_DIGEST_SIZE];
1564 	unsigned char *hstr, *hptr;
1565 	struct sha256_state sctx;
1566 	ssize_t written = 0;
1567 	int i;
1568 
1569 	sha256_init(&sctx);
1570 	sha256_update(&sctx, mdata->fw, mdata->fw_size);
1571 	sha256_final(&sctx, hash);
1572 
1573 	hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL);
1574 	if (!hstr)
1575 		return -ENOMEM;
1576 
1577 	hptr = hstr;
1578 	for (i = 0; i < SHA256_DIGEST_SIZE; i++)
1579 		hptr += sprintf(hptr, "%02x", hash[i]);
1580 
1581 	written = sysfs_emit(buf, "%s\n", hstr);
1582 
1583 	kfree(hstr);
1584 	return written;
1585 }
1586 
1587 static DEVICE_ATTR_RO(fw_buf_checksum);
1588 
1589 static ssize_t sanitize_timeout_show(struct device *dev,
1590 				  struct device_attribute *attr, char *buf)
1591 {
1592 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1593 
1594 	return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
1595 }
1596 
1597 static ssize_t sanitize_timeout_store(struct device *dev,
1598 				      struct device_attribute *attr,
1599 				      const char *buf, size_t count)
1600 {
1601 	struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1602 	unsigned long val;
1603 	int rc;
1604 
1605 	rc = kstrtoul(buf, 0, &val);
1606 	if (rc)
1607 		return rc;
1608 
1609 	mdata->sanitize_timeout = val;
1610 
1611 	return count;
1612 }
1613 
1614 static DEVICE_ATTR_RW(sanitize_timeout);
1615 
1616 static struct attribute *cxl_mock_mem_attrs[] = {
1617 	&dev_attr_security_lock.attr,
1618 	&dev_attr_event_trigger.attr,
1619 	&dev_attr_fw_buf_checksum.attr,
1620 	&dev_attr_sanitize_timeout.attr,
1621 	NULL
1622 };
1623 ATTRIBUTE_GROUPS(cxl_mock_mem);
1624 
1625 static const struct platform_device_id cxl_mock_mem_ids[] = {
1626 	{ .name = "cxl_mem", 0 },
1627 	{ .name = "cxl_rcd", 1 },
1628 	{ },
1629 };
1630 MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1631 
1632 static struct platform_driver cxl_mock_mem_driver = {
1633 	.probe = cxl_mock_mem_probe,
1634 	.id_table = cxl_mock_mem_ids,
1635 	.driver = {
1636 		.name = KBUILD_MODNAME,
1637 		.dev_groups = cxl_mock_mem_groups,
1638 		.groups = cxl_mock_mem_core_groups,
1639 	},
1640 };
1641 
1642 module_platform_driver(cxl_mock_mem_driver);
1643 MODULE_LICENSE("GPL v2");
1644 MODULE_IMPORT_NS(CXL);
1645