Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/testing/cxl/test/mem.c
29270 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
// Copyright(c) 2021 Intel Corporation. All rights reserved.
3
4
#include <linux/platform_device.h>
5
#include <linux/mod_devicetable.h>
6
#include <linux/vmalloc.h>
7
#include <linux/module.h>
8
#include <linux/delay.h>
9
#include <linux/sizes.h>
10
#include <linux/bits.h>
11
#include <cxl/mailbox.h>
12
#include <linux/unaligned.h>
13
#include <crypto/sha2.h>
14
#include <cxlmem.h>
15
16
#include "trace.h"
17
18
#define LSA_SIZE SZ_128K
19
#define FW_SIZE SZ_64M
20
#define FW_SLOTS 3
21
#define DEV_SIZE SZ_2G
22
#define EFFECT(x) (1U << x)
23
24
#define MOCK_INJECT_DEV_MAX 8
25
#define MOCK_INJECT_TEST_MAX 128
26
27
static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX;
28
29
enum cxl_command_effects {
30
CONF_CHANGE_COLD_RESET = 0,
31
CONF_CHANGE_IMMEDIATE,
32
DATA_CHANGE_IMMEDIATE,
33
POLICY_CHANGE_IMMEDIATE,
34
LOG_CHANGE_IMMEDIATE,
35
SECURITY_CHANGE_IMMEDIATE,
36
BACKGROUND_OP,
37
SECONDARY_MBOX_SUPPORTED,
38
};
39
40
#define CXL_CMD_EFFECT_NONE cpu_to_le16(0)
41
42
static struct cxl_cel_entry mock_cel[] = {
43
{
44
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS),
45
.effect = CXL_CMD_EFFECT_NONE,
46
},
47
{
48
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_FEATURES),
49
.effect = CXL_CMD_EFFECT_NONE,
50
},
51
{
52
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FEATURE),
53
.effect = CXL_CMD_EFFECT_NONE,
54
},
55
{
56
.opcode = cpu_to_le16(CXL_MBOX_OP_SET_FEATURE),
57
.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE)),
58
},
59
{
60
.opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY),
61
.effect = CXL_CMD_EFFECT_NONE,
62
},
63
{
64
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA),
65
.effect = CXL_CMD_EFFECT_NONE,
66
},
67
{
68
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO),
69
.effect = CXL_CMD_EFFECT_NONE,
70
},
71
{
72
.opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA),
73
.effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) |
74
EFFECT(DATA_CHANGE_IMMEDIATE)),
75
},
76
{
77
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO),
78
.effect = CXL_CMD_EFFECT_NONE,
79
},
80
{
81
.opcode = cpu_to_le16(CXL_MBOX_OP_SET_SHUTDOWN_STATE),
82
.effect = POLICY_CHANGE_IMMEDIATE,
83
},
84
{
85
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON),
86
.effect = CXL_CMD_EFFECT_NONE,
87
},
88
{
89
.opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON),
90
.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
91
},
92
{
93
.opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON),
94
.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)),
95
},
96
{
97
.opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO),
98
.effect = CXL_CMD_EFFECT_NONE,
99
},
100
{
101
.opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW),
102
.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
103
EFFECT(BACKGROUND_OP)),
104
},
105
{
106
.opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW),
107
.effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
108
EFFECT(CONF_CHANGE_IMMEDIATE)),
109
},
110
{
111
.opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
112
.effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
113
EFFECT(SECURITY_CHANGE_IMMEDIATE) |
114
EFFECT(BACKGROUND_OP)),
115
},
116
};
117
118
/* See CXL 2.0 Table 181 Get Health Info Output Payload */
119
struct cxl_mbox_health_info {
120
u8 health_status;
121
u8 media_status;
122
u8 ext_status;
123
u8 life_used;
124
__le16 temperature;
125
__le32 dirty_shutdowns;
126
__le32 volatile_errors;
127
__le32 pmem_errors;
128
} __packed;
129
130
static struct {
131
struct cxl_mbox_get_supported_logs gsl;
132
struct cxl_gsl_entry entry;
133
} mock_gsl_payload = {
134
.gsl = {
135
.entries = cpu_to_le16(1),
136
},
137
.entry = {
138
.uuid = DEFINE_CXL_CEL_UUID,
139
.size = cpu_to_le32(sizeof(mock_cel)),
140
},
141
};
142
143
#define PASS_TRY_LIMIT 3
144
145
#define CXL_TEST_EVENT_CNT_MAX 15
146
147
/* Set a number of events to return at a time for simulation. */
148
#define CXL_TEST_EVENT_RET_MAX 4
149
150
struct mock_event_log {
151
u16 clear_idx;
152
u16 cur_idx;
153
u16 nr_events;
154
u16 nr_overflow;
155
u16 overflow_reset;
156
struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX];
157
};
158
159
struct mock_event_store {
160
struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
161
u32 ev_status;
162
};
163
164
struct vendor_test_feat {
165
__le32 data;
166
} __packed;
167
168
struct cxl_mockmem_data {
169
void *lsa;
170
void *fw;
171
int fw_slot;
172
int fw_staged;
173
size_t fw_size;
174
u32 security_state;
175
u8 user_pass[NVDIMM_PASSPHRASE_LEN];
176
u8 master_pass[NVDIMM_PASSPHRASE_LEN];
177
int user_limit;
178
int master_limit;
179
struct mock_event_store mes;
180
struct cxl_memdev_state *mds;
181
u8 event_buf[SZ_4K];
182
u64 timestamp;
183
unsigned long sanitize_timeout;
184
struct vendor_test_feat test_feat;
185
u8 shutdown_state;
186
};
187
188
static struct mock_event_log *event_find_log(struct device *dev, int log_type)
189
{
190
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
191
192
if (log_type >= CXL_EVENT_TYPE_MAX)
193
return NULL;
194
return &mdata->mes.mock_logs[log_type];
195
}
196
197
static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log)
198
{
199
return log->events[log->cur_idx];
200
}
201
202
static void event_reset_log(struct mock_event_log *log)
203
{
204
log->cur_idx = 0;
205
log->clear_idx = 0;
206
log->nr_overflow = log->overflow_reset;
207
}
208
209
/* Handle can never be 0 use 1 based indexing for handle */
210
static u16 event_get_clear_handle(struct mock_event_log *log)
211
{
212
return log->clear_idx + 1;
213
}
214
215
/* Handle can never be 0 use 1 based indexing for handle */
216
static __le16 event_get_cur_event_handle(struct mock_event_log *log)
217
{
218
u16 cur_handle = log->cur_idx + 1;
219
220
return cpu_to_le16(cur_handle);
221
}
222
223
static bool event_log_empty(struct mock_event_log *log)
224
{
225
return log->cur_idx == log->nr_events;
226
}
227
228
static void mes_add_event(struct mock_event_store *mes,
229
enum cxl_event_log_type log_type,
230
struct cxl_event_record_raw *event)
231
{
232
struct mock_event_log *log;
233
234
if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX))
235
return;
236
237
log = &mes->mock_logs[log_type];
238
239
if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) {
240
log->nr_overflow++;
241
log->overflow_reset = log->nr_overflow;
242
return;
243
}
244
245
log->events[log->nr_events] = event;
246
log->nr_events++;
247
}
248
249
/*
250
* Vary the number of events returned to simulate events occuring while the
251
* logs are being read.
252
*/
253
static int ret_limit = 0;
254
255
static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
256
{
257
struct cxl_get_event_payload *pl;
258
struct mock_event_log *log;
259
u16 nr_overflow;
260
u8 log_type;
261
int i;
262
263
if (cmd->size_in != sizeof(log_type))
264
return -EINVAL;
265
266
ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX;
267
if (!ret_limit)
268
ret_limit = 1;
269
270
if (cmd->size_out < struct_size(pl, records, ret_limit))
271
return -EINVAL;
272
273
log_type = *((u8 *)cmd->payload_in);
274
if (log_type >= CXL_EVENT_TYPE_MAX)
275
return -EINVAL;
276
277
memset(cmd->payload_out, 0, struct_size(pl, records, 0));
278
279
log = event_find_log(dev, log_type);
280
if (!log || event_log_empty(log))
281
return 0;
282
283
pl = cmd->payload_out;
284
285
for (i = 0; i < ret_limit && !event_log_empty(log); i++) {
286
memcpy(&pl->records[i], event_get_current(log),
287
sizeof(pl->records[i]));
288
pl->records[i].event.generic.hdr.handle =
289
event_get_cur_event_handle(log);
290
log->cur_idx++;
291
}
292
293
cmd->size_out = struct_size(pl, records, i);
294
pl->record_count = cpu_to_le16(i);
295
if (!event_log_empty(log))
296
pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS;
297
298
if (log->nr_overflow) {
299
u64 ns;
300
301
pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW;
302
pl->overflow_err_count = cpu_to_le16(nr_overflow);
303
ns = ktime_get_real_ns();
304
ns -= 5000000000; /* 5s ago */
305
pl->first_overflow_timestamp = cpu_to_le64(ns);
306
ns = ktime_get_real_ns();
307
ns -= 1000000000; /* 1s ago */
308
pl->last_overflow_timestamp = cpu_to_le64(ns);
309
}
310
311
return 0;
312
}
313
314
static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd)
315
{
316
struct cxl_mbox_clear_event_payload *pl = cmd->payload_in;
317
struct mock_event_log *log;
318
u8 log_type = pl->event_log;
319
u16 handle;
320
int nr;
321
322
if (log_type >= CXL_EVENT_TYPE_MAX)
323
return -EINVAL;
324
325
log = event_find_log(dev, log_type);
326
if (!log)
327
return 0; /* No mock data in this log */
328
329
/*
330
* This check is technically not invalid per the specification AFAICS.
331
* (The host could 'guess' handles and clear them in order).
332
* However, this is not good behavior for the host so test it.
333
*/
334
if (log->clear_idx + pl->nr_recs > log->cur_idx) {
335
dev_err(dev,
336
"Attempting to clear more events than returned!\n");
337
return -EINVAL;
338
}
339
340
/* Check handle order prior to clearing events */
341
for (nr = 0, handle = event_get_clear_handle(log);
342
nr < pl->nr_recs;
343
nr++, handle++) {
344
if (handle != le16_to_cpu(pl->handles[nr])) {
345
dev_err(dev, "Clearing events out of order\n");
346
return -EINVAL;
347
}
348
}
349
350
if (log->nr_overflow)
351
log->nr_overflow = 0;
352
353
/* Clear events */
354
log->clear_idx += pl->nr_recs;
355
return 0;
356
}
357
358
static void cxl_mock_event_trigger(struct device *dev)
359
{
360
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
361
struct mock_event_store *mes = &mdata->mes;
362
int i;
363
364
for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) {
365
struct mock_event_log *log;
366
367
log = event_find_log(dev, i);
368
if (log)
369
event_reset_log(log);
370
}
371
372
cxl_mem_get_event_records(mdata->mds, mes->ev_status);
373
}
374
375
struct cxl_event_record_raw maint_needed = {
376
.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
377
0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
378
.event.generic = {
379
.hdr = {
380
.length = sizeof(struct cxl_event_record_raw),
381
.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
382
/* .handle = Set dynamically */
383
.related_handle = cpu_to_le16(0xa5b6),
384
},
385
.data = { 0xDE, 0xAD, 0xBE, 0xEF },
386
},
387
};
388
389
struct cxl_event_record_raw hardware_replace = {
390
.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
391
0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
392
.event.generic = {
393
.hdr = {
394
.length = sizeof(struct cxl_event_record_raw),
395
.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
396
/* .handle = Set dynamically */
397
.related_handle = cpu_to_le16(0xb6a5),
398
},
399
.data = { 0xDE, 0xAD, 0xBE, 0xEF },
400
},
401
};
402
403
struct cxl_test_gen_media {
404
uuid_t id;
405
struct cxl_event_gen_media rec;
406
} __packed;
407
408
struct cxl_test_gen_media gen_media = {
409
.id = CXL_EVENT_GEN_MEDIA_UUID,
410
.rec = {
411
.media_hdr = {
412
.hdr = {
413
.length = sizeof(struct cxl_test_gen_media),
414
.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
415
/* .handle = Set dynamically */
416
.related_handle = cpu_to_le16(0),
417
},
418
.phys_addr = cpu_to_le64(0x2000),
419
.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
420
.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
421
.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
422
/* .validity_flags = <set below> */
423
.channel = 1,
424
.rank = 30,
425
},
426
.component_id = { 0x3, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
427
.cme_threshold_ev_flags = 3,
428
.cme_count = { 33, 0, 0 },
429
.sub_type = 0x2,
430
},
431
};
432
433
struct cxl_test_dram {
434
uuid_t id;
435
struct cxl_event_dram rec;
436
} __packed;
437
438
struct cxl_test_dram dram = {
439
.id = CXL_EVENT_DRAM_UUID,
440
.rec = {
441
.media_hdr = {
442
.hdr = {
443
.length = sizeof(struct cxl_test_dram),
444
.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
445
/* .handle = Set dynamically */
446
.related_handle = cpu_to_le16(0),
447
},
448
.phys_addr = cpu_to_le64(0x8000),
449
.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
450
.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
451
.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
452
/* .validity_flags = <set below> */
453
.channel = 1,
454
},
455
.bank_group = 5,
456
.bank = 2,
457
.column = {0xDE, 0xAD},
458
.component_id = { 0x1, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
459
.sub_channel = 8,
460
.cme_threshold_ev_flags = 2,
461
.cvme_count = { 14, 0, 0 },
462
.sub_type = 0x5,
463
},
464
};
465
466
struct cxl_test_mem_module {
467
uuid_t id;
468
struct cxl_event_mem_module rec;
469
} __packed;
470
471
struct cxl_test_mem_module mem_module = {
472
.id = CXL_EVENT_MEM_MODULE_UUID,
473
.rec = {
474
.hdr = {
475
.length = sizeof(struct cxl_test_mem_module),
476
/* .handle = Set dynamically */
477
.related_handle = cpu_to_le16(0),
478
},
479
.event_type = CXL_MMER_TEMP_CHANGE,
480
.info = {
481
.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
482
.media_status = CXL_DHI_MS_ALL_DATA_LOST,
483
.add_status = (CXL_DHI_AS_CRITICAL << 2) |
484
(CXL_DHI_AS_WARNING << 4) |
485
(CXL_DHI_AS_WARNING << 5),
486
.device_temp = { 0xDE, 0xAD},
487
.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
488
.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
489
.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
490
},
491
/* .validity_flags = <set below> */
492
.component_id = { 0x2, 0x74, 0xc5, 0x8, 0x9a, 0x1a, 0xb, 0xfc, 0xd2, 0x7e, 0x2f, 0x31, 0x9b, 0x3c, 0x81, 0x4d },
493
.event_sub_type = 0x3,
494
},
495
};
496
497
static int mock_set_timestamp(struct cxl_dev_state *cxlds,
498
struct cxl_mbox_cmd *cmd)
499
{
500
struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev);
501
struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in;
502
503
if (cmd->size_in != sizeof(*ts))
504
return -EINVAL;
505
506
if (cmd->size_out != 0)
507
return -EINVAL;
508
509
mdata->timestamp = le64_to_cpu(ts->timestamp);
510
return 0;
511
}
512
513
static void cxl_mock_add_event_logs(struct mock_event_store *mes)
514
{
515
put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK |
516
CXL_GMER_VALID_COMPONENT | CXL_GMER_VALID_COMPONENT_ID_FORMAT,
517
&gen_media.rec.media_hdr.validity_flags);
518
519
put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
520
CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN | CXL_DER_VALID_SUB_CHANNEL |
521
CXL_DER_VALID_COMPONENT | CXL_DER_VALID_COMPONENT_ID_FORMAT,
522
&dram.rec.media_hdr.validity_flags);
523
524
put_unaligned_le16(CXL_MMER_VALID_COMPONENT | CXL_MMER_VALID_COMPONENT_ID_FORMAT,
525
&mem_module.rec.validity_flags);
526
527
mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
528
mes_add_event(mes, CXL_EVENT_TYPE_INFO,
529
(struct cxl_event_record_raw *)&gen_media);
530
mes_add_event(mes, CXL_EVENT_TYPE_INFO,
531
(struct cxl_event_record_raw *)&mem_module);
532
mes->ev_status |= CXLDEV_EVENT_STATUS_INFO;
533
534
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed);
535
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
536
mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
537
(struct cxl_event_record_raw *)&dram);
538
mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
539
(struct cxl_event_record_raw *)&gen_media);
540
mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
541
(struct cxl_event_record_raw *)&mem_module);
542
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
543
mes_add_event(mes, CXL_EVENT_TYPE_FAIL,
544
(struct cxl_event_record_raw *)&dram);
545
/* Overflow this log */
546
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
547
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
548
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
549
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
550
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
551
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
552
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
553
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
554
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
555
mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace);
556
mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL;
557
558
mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace);
559
mes_add_event(mes, CXL_EVENT_TYPE_FATAL,
560
(struct cxl_event_record_raw *)&dram);
561
mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL;
562
}
563
564
static int mock_gsl(struct cxl_mbox_cmd *cmd)
565
{
566
if (cmd->size_out < sizeof(mock_gsl_payload))
567
return -EINVAL;
568
569
memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload));
570
cmd->size_out = sizeof(mock_gsl_payload);
571
572
return 0;
573
}
574
575
static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd)
576
{
577
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
578
struct cxl_mbox_get_log *gl = cmd->payload_in;
579
u32 offset = le32_to_cpu(gl->offset);
580
u32 length = le32_to_cpu(gl->length);
581
uuid_t uuid = DEFINE_CXL_CEL_UUID;
582
void *data = &mock_cel;
583
584
if (cmd->size_in < sizeof(*gl))
585
return -EINVAL;
586
if (length > cxl_mbox->payload_size)
587
return -EINVAL;
588
if (offset + length > sizeof(mock_cel))
589
return -EINVAL;
590
if (!uuid_equal(&gl->uuid, &uuid))
591
return -EINVAL;
592
if (length > cmd->size_out)
593
return -EINVAL;
594
595
memcpy(cmd->payload_out, data + offset, length);
596
597
return 0;
598
}
599
600
static int mock_rcd_id(struct cxl_mbox_cmd *cmd)
601
{
602
struct cxl_mbox_identify id = {
603
.fw_revision = { "mock fw v1 " },
604
.total_capacity =
605
cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
606
.volatile_capacity =
607
cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
608
};
609
610
if (cmd->size_out < sizeof(id))
611
return -EINVAL;
612
613
memcpy(cmd->payload_out, &id, sizeof(id));
614
615
return 0;
616
}
617
618
static int mock_id(struct cxl_mbox_cmd *cmd)
619
{
620
struct cxl_mbox_identify id = {
621
.fw_revision = { "mock fw v1 " },
622
.lsa_size = cpu_to_le32(LSA_SIZE),
623
.partition_align =
624
cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER),
625
.total_capacity =
626
cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER),
627
.inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX),
628
};
629
630
put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer);
631
632
if (cmd->size_out < sizeof(id))
633
return -EINVAL;
634
635
memcpy(cmd->payload_out, &id, sizeof(id));
636
637
return 0;
638
}
639
640
static int mock_partition_info(struct cxl_mbox_cmd *cmd)
641
{
642
struct cxl_mbox_get_partition_info pi = {
643
.active_volatile_cap =
644
cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
645
.active_persistent_cap =
646
cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER),
647
};
648
649
if (cmd->size_out < sizeof(pi))
650
return -EINVAL;
651
652
memcpy(cmd->payload_out, &pi, sizeof(pi));
653
654
return 0;
655
}
656
657
void cxl_mockmem_sanitize_work(struct work_struct *work)
658
{
659
struct cxl_memdev_state *mds =
660
container_of(work, typeof(*mds), security.poll_dwork.work);
661
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
662
663
mutex_lock(&cxl_mbox->mbox_mutex);
664
if (mds->security.sanitize_node)
665
sysfs_notify_dirent(mds->security.sanitize_node);
666
mds->security.sanitize_active = false;
667
mutex_unlock(&cxl_mbox->mbox_mutex);
668
669
dev_dbg(mds->cxlds.dev, "sanitize complete\n");
670
}
671
672
static int mock_sanitize(struct cxl_mockmem_data *mdata,
673
struct cxl_mbox_cmd *cmd)
674
{
675
struct cxl_memdev_state *mds = mdata->mds;
676
struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox;
677
int rc = 0;
678
679
if (cmd->size_in != 0)
680
return -EINVAL;
681
682
if (cmd->size_out != 0)
683
return -EINVAL;
684
685
if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
686
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
687
return -ENXIO;
688
}
689
if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
690
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
691
return -ENXIO;
692
}
693
694
mutex_lock(&cxl_mbox->mbox_mutex);
695
if (schedule_delayed_work(&mds->security.poll_dwork,
696
msecs_to_jiffies(mdata->sanitize_timeout))) {
697
mds->security.sanitize_active = true;
698
dev_dbg(mds->cxlds.dev, "sanitize issued\n");
699
} else
700
rc = -EBUSY;
701
mutex_unlock(&cxl_mbox->mbox_mutex);
702
703
return rc;
704
}
705
706
static int mock_secure_erase(struct cxl_mockmem_data *mdata,
707
struct cxl_mbox_cmd *cmd)
708
{
709
if (cmd->size_in != 0)
710
return -EINVAL;
711
712
if (cmd->size_out != 0)
713
return -EINVAL;
714
715
if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
716
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
717
return -ENXIO;
718
}
719
720
if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) {
721
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
722
return -ENXIO;
723
}
724
725
return 0;
726
}
727
728
static int mock_get_security_state(struct cxl_mockmem_data *mdata,
729
struct cxl_mbox_cmd *cmd)
730
{
731
if (cmd->size_in)
732
return -EINVAL;
733
734
if (cmd->size_out != sizeof(u32))
735
return -EINVAL;
736
737
memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32));
738
739
return 0;
740
}
741
742
static void master_plimit_check(struct cxl_mockmem_data *mdata)
743
{
744
if (mdata->master_limit == PASS_TRY_LIMIT)
745
return;
746
mdata->master_limit++;
747
if (mdata->master_limit == PASS_TRY_LIMIT)
748
mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
749
}
750
751
static void user_plimit_check(struct cxl_mockmem_data *mdata)
752
{
753
if (mdata->user_limit == PASS_TRY_LIMIT)
754
return;
755
mdata->user_limit++;
756
if (mdata->user_limit == PASS_TRY_LIMIT)
757
mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
758
}
759
760
static int mock_set_passphrase(struct cxl_mockmem_data *mdata,
761
struct cxl_mbox_cmd *cmd)
762
{
763
struct cxl_set_pass *set_pass;
764
765
if (cmd->size_in != sizeof(*set_pass))
766
return -EINVAL;
767
768
if (cmd->size_out != 0)
769
return -EINVAL;
770
771
if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
772
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
773
return -ENXIO;
774
}
775
776
set_pass = cmd->payload_in;
777
switch (set_pass->type) {
778
case CXL_PMEM_SEC_PASS_MASTER:
779
if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
780
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
781
return -ENXIO;
782
}
783
/*
784
* CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in
785
* the security disabled state when the user passphrase is not set.
786
*/
787
if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
788
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
789
return -ENXIO;
790
}
791
if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
792
master_plimit_check(mdata);
793
cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
794
return -ENXIO;
795
}
796
memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
797
mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
798
return 0;
799
800
case CXL_PMEM_SEC_PASS_USER:
801
if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
802
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
803
return -ENXIO;
804
}
805
if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) {
806
user_plimit_check(mdata);
807
cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
808
return -ENXIO;
809
}
810
memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN);
811
mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET;
812
return 0;
813
814
default:
815
cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
816
}
817
return -EINVAL;
818
}
819
820
static int mock_disable_passphrase(struct cxl_mockmem_data *mdata,
821
struct cxl_mbox_cmd *cmd)
822
{
823
struct cxl_disable_pass *dis_pass;
824
825
if (cmd->size_in != sizeof(*dis_pass))
826
return -EINVAL;
827
828
if (cmd->size_out != 0)
829
return -EINVAL;
830
831
if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
832
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
833
return -ENXIO;
834
}
835
836
dis_pass = cmd->payload_in;
837
switch (dis_pass->type) {
838
case CXL_PMEM_SEC_PASS_MASTER:
839
if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) {
840
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
841
return -ENXIO;
842
}
843
844
if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) {
845
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
846
return -ENXIO;
847
}
848
849
if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) {
850
master_plimit_check(mdata);
851
cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
852
return -ENXIO;
853
}
854
855
mdata->master_limit = 0;
856
memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN);
857
mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET;
858
return 0;
859
860
case CXL_PMEM_SEC_PASS_USER:
861
if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
862
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
863
return -ENXIO;
864
}
865
866
if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
867
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
868
return -ENXIO;
869
}
870
871
if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
872
user_plimit_check(mdata);
873
cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
874
return -ENXIO;
875
}
876
877
mdata->user_limit = 0;
878
memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
879
mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET |
880
CXL_PMEM_SEC_STATE_LOCKED);
881
return 0;
882
883
default:
884
cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
885
return -EINVAL;
886
}
887
888
return 0;
889
}
890
891
static int mock_freeze_security(struct cxl_mockmem_data *mdata,
892
struct cxl_mbox_cmd *cmd)
893
{
894
if (cmd->size_in != 0)
895
return -EINVAL;
896
897
if (cmd->size_out != 0)
898
return -EINVAL;
899
900
if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN)
901
return 0;
902
903
mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN;
904
return 0;
905
}
906
907
static int mock_unlock_security(struct cxl_mockmem_data *mdata,
908
struct cxl_mbox_cmd *cmd)
909
{
910
if (cmd->size_in != NVDIMM_PASSPHRASE_LEN)
911
return -EINVAL;
912
913
if (cmd->size_out != 0)
914
return -EINVAL;
915
916
if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
917
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
918
return -ENXIO;
919
}
920
921
if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) {
922
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
923
return -ENXIO;
924
}
925
926
if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) {
927
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
928
return -ENXIO;
929
}
930
931
if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) {
932
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
933
return -ENXIO;
934
}
935
936
if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) {
937
if (++mdata->user_limit == PASS_TRY_LIMIT)
938
mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT;
939
cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
940
return -ENXIO;
941
}
942
943
mdata->user_limit = 0;
944
mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
945
return 0;
946
}
947
948
static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata,
949
struct cxl_mbox_cmd *cmd)
950
{
951
struct cxl_pass_erase *erase;
952
953
if (cmd->size_in != sizeof(*erase))
954
return -EINVAL;
955
956
if (cmd->size_out != 0)
957
return -EINVAL;
958
959
erase = cmd->payload_in;
960
if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) {
961
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
962
return -ENXIO;
963
}
964
965
if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT &&
966
erase->type == CXL_PMEM_SEC_PASS_USER) {
967
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
968
return -ENXIO;
969
}
970
971
if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT &&
972
erase->type == CXL_PMEM_SEC_PASS_MASTER) {
973
cmd->return_code = CXL_MBOX_CMD_RC_SECURITY;
974
return -ENXIO;
975
}
976
977
switch (erase->type) {
978
case CXL_PMEM_SEC_PASS_MASTER:
979
/*
980
* The spec does not clearly define the behavior of the scenario
981
* where a master passphrase is passed in while the master
982
* passphrase is not set and user passphrase is not set. The
983
* code will take the assumption that it will behave the same
984
* as a CXL secure erase command without passphrase (0x4401).
985
*/
986
if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) {
987
if (memcmp(mdata->master_pass, erase->pass,
988
NVDIMM_PASSPHRASE_LEN)) {
989
master_plimit_check(mdata);
990
cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
991
return -ENXIO;
992
}
993
mdata->master_limit = 0;
994
mdata->user_limit = 0;
995
mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
996
memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
997
mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED;
998
} else {
999
/*
1000
* CXL rev3 8.2.9.8.6.3 Disable Passphrase
1001
* When master passphrase is disabled, the device shall
1002
* return Invalid Input for the Passphrase Secure Erase
1003
* command with master passphrase.
1004
*/
1005
return -EINVAL;
1006
}
1007
/* Scramble encryption keys so that data is effectively erased */
1008
break;
1009
case CXL_PMEM_SEC_PASS_USER:
1010
/*
1011
* The spec does not clearly define the behavior of the scenario
1012
* where a user passphrase is passed in while the user
1013
* passphrase is not set. The code will take the assumption that
1014
* it will behave the same as a CXL secure erase command without
1015
* passphrase (0x4401).
1016
*/
1017
if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) {
1018
if (memcmp(mdata->user_pass, erase->pass,
1019
NVDIMM_PASSPHRASE_LEN)) {
1020
user_plimit_check(mdata);
1021
cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE;
1022
return -ENXIO;
1023
}
1024
mdata->user_limit = 0;
1025
mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET;
1026
memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN);
1027
}
1028
1029
/*
1030
* CXL rev3 Table 8-118
1031
* If user passphrase is not set or supported by device, current
1032
* passphrase value is ignored. Will make the assumption that
1033
* the operation will proceed as secure erase w/o passphrase
1034
* since spec is not explicit.
1035
*/
1036
1037
/* Scramble encryption keys so that data is effectively erased */
1038
break;
1039
default:
1040
return -EINVAL;
1041
}
1042
1043
return 0;
1044
}
1045
1046
static int mock_get_lsa(struct cxl_mockmem_data *mdata,
1047
struct cxl_mbox_cmd *cmd)
1048
{
1049
struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in;
1050
void *lsa = mdata->lsa;
1051
u32 offset, length;
1052
1053
if (sizeof(*get_lsa) > cmd->size_in)
1054
return -EINVAL;
1055
offset = le32_to_cpu(get_lsa->offset);
1056
length = le32_to_cpu(get_lsa->length);
1057
if (offset + length > LSA_SIZE)
1058
return -EINVAL;
1059
if (length > cmd->size_out)
1060
return -EINVAL;
1061
1062
memcpy(cmd->payload_out, lsa + offset, length);
1063
return 0;
1064
}
1065
1066
static int mock_set_lsa(struct cxl_mockmem_data *mdata,
1067
struct cxl_mbox_cmd *cmd)
1068
{
1069
struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in;
1070
void *lsa = mdata->lsa;
1071
u32 offset, length;
1072
1073
if (sizeof(*set_lsa) > cmd->size_in)
1074
return -EINVAL;
1075
offset = le32_to_cpu(set_lsa->offset);
1076
length = cmd->size_in - sizeof(*set_lsa);
1077
if (offset + length > LSA_SIZE)
1078
return -EINVAL;
1079
1080
memcpy(lsa + offset, &set_lsa->data[0], length);
1081
return 0;
1082
}
1083
1084
static int mock_health_info(struct cxl_mbox_cmd *cmd)
1085
{
1086
struct cxl_mbox_health_info health_info = {
1087
/* set flags for maint needed, perf degraded, hw replacement */
1088
.health_status = 0x7,
1089
/* set media status to "All Data Lost" */
1090
.media_status = 0x3,
1091
/*
1092
* set ext_status flags for:
1093
* ext_life_used: normal,
1094
* ext_temperature: critical,
1095
* ext_corrected_volatile: warning,
1096
* ext_corrected_persistent: normal,
1097
*/
1098
.ext_status = 0x18,
1099
.life_used = 15,
1100
.temperature = cpu_to_le16(25),
1101
.dirty_shutdowns = cpu_to_le32(10),
1102
.volatile_errors = cpu_to_le32(20),
1103
.pmem_errors = cpu_to_le32(30),
1104
};
1105
1106
if (cmd->size_out < sizeof(health_info))
1107
return -EINVAL;
1108
1109
memcpy(cmd->payload_out, &health_info, sizeof(health_info));
1110
return 0;
1111
}
1112
1113
static int mock_set_shutdown_state(struct cxl_mockmem_data *mdata,
1114
struct cxl_mbox_cmd *cmd)
1115
{
1116
struct cxl_mbox_set_shutdown_state_in *ss = cmd->payload_in;
1117
1118
if (cmd->size_in != sizeof(*ss))
1119
return -EINVAL;
1120
1121
if (cmd->size_out != 0)
1122
return -EINVAL;
1123
1124
mdata->shutdown_state = ss->state;
1125
return 0;
1126
}
1127
1128
static struct mock_poison {
1129
struct cxl_dev_state *cxlds;
1130
u64 dpa;
1131
} mock_poison_list[MOCK_INJECT_TEST_MAX];
1132
1133
static struct cxl_mbox_poison_out *
1134
cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length)
1135
{
1136
struct cxl_mbox_poison_out *po;
1137
int nr_records = 0;
1138
u64 dpa;
1139
1140
po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL);
1141
if (!po)
1142
return NULL;
1143
1144
for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1145
if (mock_poison_list[i].cxlds != cxlds)
1146
continue;
1147
if (mock_poison_list[i].dpa < offset ||
1148
mock_poison_list[i].dpa > offset + length - 1)
1149
continue;
1150
1151
dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED;
1152
po->record[nr_records].address = cpu_to_le64(dpa);
1153
po->record[nr_records].length = cpu_to_le32(1);
1154
nr_records++;
1155
if (nr_records == poison_inject_dev_max)
1156
break;
1157
}
1158
1159
/* Always return count, even when zero */
1160
po->count = cpu_to_le16(nr_records);
1161
1162
return po;
1163
}
1164
1165
static int mock_get_poison(struct cxl_dev_state *cxlds,
1166
struct cxl_mbox_cmd *cmd)
1167
{
1168
struct cxl_mbox_poison_in *pi = cmd->payload_in;
1169
struct cxl_mbox_poison_out *po;
1170
u64 offset = le64_to_cpu(pi->offset);
1171
u64 length = le64_to_cpu(pi->length);
1172
int nr_records;
1173
1174
po = cxl_get_injected_po(cxlds, offset, length);
1175
if (!po)
1176
return -ENOMEM;
1177
nr_records = le16_to_cpu(po->count);
1178
memcpy(cmd->payload_out, po, struct_size(po, record, nr_records));
1179
cmd->size_out = struct_size(po, record, nr_records);
1180
kfree(po);
1181
1182
return 0;
1183
}
1184
1185
static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds)
1186
{
1187
int count = 0;
1188
1189
for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1190
if (mock_poison_list[i].cxlds == cxlds)
1191
count++;
1192
}
1193
return (count >= poison_inject_dev_max);
1194
}
1195
1196
static int mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa)
1197
{
1198
/* Return EBUSY to match the CXL driver handling */
1199
if (mock_poison_dev_max_injected(cxlds)) {
1200
dev_dbg(cxlds->dev,
1201
"Device poison injection limit has been reached: %d\n",
1202
poison_inject_dev_max);
1203
return -EBUSY;
1204
}
1205
1206
for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1207
if (!mock_poison_list[i].cxlds) {
1208
mock_poison_list[i].cxlds = cxlds;
1209
mock_poison_list[i].dpa = dpa;
1210
return 0;
1211
}
1212
}
1213
dev_dbg(cxlds->dev,
1214
"Mock test poison injection limit has been reached: %d\n",
1215
MOCK_INJECT_TEST_MAX);
1216
1217
return -ENXIO;
1218
}
1219
1220
static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa)
1221
{
1222
for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1223
if (mock_poison_list[i].cxlds == cxlds &&
1224
mock_poison_list[i].dpa == dpa)
1225
return true;
1226
}
1227
return false;
1228
}
1229
1230
static int mock_inject_poison(struct cxl_dev_state *cxlds,
1231
struct cxl_mbox_cmd *cmd)
1232
{
1233
struct cxl_mbox_inject_poison *pi = cmd->payload_in;
1234
u64 dpa = le64_to_cpu(pi->address);
1235
1236
if (mock_poison_found(cxlds, dpa)) {
1237
/* Not an error to inject poison if already poisoned */
1238
dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa);
1239
return 0;
1240
}
1241
1242
return mock_poison_add(cxlds, dpa);
1243
}
1244
1245
static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa)
1246
{
1247
for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1248
if (mock_poison_list[i].cxlds == cxlds &&
1249
mock_poison_list[i].dpa == dpa) {
1250
mock_poison_list[i].cxlds = NULL;
1251
return true;
1252
}
1253
}
1254
return false;
1255
}
1256
1257
static int mock_clear_poison(struct cxl_dev_state *cxlds,
1258
struct cxl_mbox_cmd *cmd)
1259
{
1260
struct cxl_mbox_clear_poison *pi = cmd->payload_in;
1261
u64 dpa = le64_to_cpu(pi->address);
1262
1263
/*
1264
* A real CXL device will write pi->write_data to the address
1265
* being cleared. In this mock, just delete this address from
1266
* the mock poison list.
1267
*/
1268
if (!mock_poison_del(cxlds, dpa))
1269
dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa);
1270
1271
return 0;
1272
}
1273
1274
static bool mock_poison_list_empty(void)
1275
{
1276
for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) {
1277
if (mock_poison_list[i].cxlds)
1278
return false;
1279
}
1280
return true;
1281
}
1282
1283
static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf)
1284
{
1285
return sysfs_emit(buf, "%u\n", poison_inject_dev_max);
1286
}
1287
1288
static ssize_t poison_inject_max_store(struct device_driver *drv,
1289
const char *buf, size_t len)
1290
{
1291
int val;
1292
1293
if (kstrtoint(buf, 0, &val) < 0)
1294
return -EINVAL;
1295
1296
if (!mock_poison_list_empty())
1297
return -EBUSY;
1298
1299
if (val <= MOCK_INJECT_TEST_MAX)
1300
poison_inject_dev_max = val;
1301
else
1302
return -EINVAL;
1303
1304
return len;
1305
}
1306
1307
static DRIVER_ATTR_RW(poison_inject_max);
1308
1309
static struct attribute *cxl_mock_mem_core_attrs[] = {
1310
&driver_attr_poison_inject_max.attr,
1311
NULL
1312
};
1313
ATTRIBUTE_GROUPS(cxl_mock_mem_core);
1314
1315
static int mock_fw_info(struct cxl_mockmem_data *mdata,
1316
struct cxl_mbox_cmd *cmd)
1317
{
1318
struct cxl_mbox_get_fw_info fw_info = {
1319
.num_slots = FW_SLOTS,
1320
.slot_info = (mdata->fw_slot & 0x7) |
1321
((mdata->fw_staged & 0x7) << 3),
1322
.activation_cap = 0,
1323
};
1324
1325
strcpy(fw_info.slot_1_revision, "cxl_test_fw_001");
1326
strcpy(fw_info.slot_2_revision, "cxl_test_fw_002");
1327
strcpy(fw_info.slot_3_revision, "cxl_test_fw_003");
1328
strcpy(fw_info.slot_4_revision, "");
1329
1330
if (cmd->size_out < sizeof(fw_info))
1331
return -EINVAL;
1332
1333
memcpy(cmd->payload_out, &fw_info, sizeof(fw_info));
1334
return 0;
1335
}
1336
1337
static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
1338
struct cxl_mbox_cmd *cmd)
1339
{
1340
struct cxl_mbox_transfer_fw *transfer = cmd->payload_in;
1341
void *fw = mdata->fw;
1342
size_t offset, length;
1343
1344
offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT;
1345
length = cmd->size_in - sizeof(*transfer);
1346
if (offset + length > FW_SIZE)
1347
return -EINVAL;
1348
1349
switch (transfer->action) {
1350
case CXL_FW_TRANSFER_ACTION_FULL:
1351
if (offset != 0)
1352
return -EINVAL;
1353
fallthrough;
1354
case CXL_FW_TRANSFER_ACTION_END:
1355
if (transfer->slot == 0 || transfer->slot > FW_SLOTS)
1356
return -EINVAL;
1357
mdata->fw_size = offset + length;
1358
break;
1359
case CXL_FW_TRANSFER_ACTION_INITIATE:
1360
case CXL_FW_TRANSFER_ACTION_CONTINUE:
1361
break;
1362
case CXL_FW_TRANSFER_ACTION_ABORT:
1363
return 0;
1364
default:
1365
return -EINVAL;
1366
}
1367
1368
memcpy(fw + offset, transfer->data, length);
1369
usleep_range(1500, 2000);
1370
return 0;
1371
}
1372
1373
static int mock_activate_fw(struct cxl_mockmem_data *mdata,
1374
struct cxl_mbox_cmd *cmd)
1375
{
1376
struct cxl_mbox_activate_fw *activate = cmd->payload_in;
1377
1378
if (activate->slot == 0 || activate->slot > FW_SLOTS)
1379
return -EINVAL;
1380
1381
switch (activate->action) {
1382
case CXL_FW_ACTIVATE_ONLINE:
1383
mdata->fw_slot = activate->slot;
1384
mdata->fw_staged = 0;
1385
return 0;
1386
case CXL_FW_ACTIVATE_OFFLINE:
1387
mdata->fw_staged = activate->slot;
1388
return 0;
1389
}
1390
1391
return -EINVAL;
1392
}
1393
1394
#define CXL_VENDOR_FEATURE_TEST \
1395
UUID_INIT(0xffffffff, 0xffff, 0xffff, 0xff, 0xff, 0xff, 0xff, 0xff, \
1396
0xff, 0xff, 0xff)
1397
1398
static void fill_feature_vendor_test(struct cxl_feat_entry *feat)
1399
{
1400
feat->uuid = CXL_VENDOR_FEATURE_TEST;
1401
feat->id = 0;
1402
feat->get_feat_size = cpu_to_le16(0x4);
1403
feat->set_feat_size = cpu_to_le16(0x4);
1404
feat->flags = cpu_to_le32(CXL_FEATURE_F_CHANGEABLE |
1405
CXL_FEATURE_F_DEFAULT_SEL |
1406
CXL_FEATURE_F_SAVED_SEL);
1407
feat->get_feat_ver = 1;
1408
feat->set_feat_ver = 1;
1409
feat->effects = cpu_to_le16(CXL_CMD_CONFIG_CHANGE_COLD_RESET |
1410
CXL_CMD_EFFECTS_VALID);
1411
}
1412
1413
#define MAX_CXL_TEST_FEATS 1
1414
1415
static int mock_get_test_feature(struct cxl_mockmem_data *mdata,
1416
struct cxl_mbox_cmd *cmd)
1417
{
1418
struct vendor_test_feat *output = cmd->payload_out;
1419
struct cxl_mbox_get_feat_in *input = cmd->payload_in;
1420
u16 offset = le16_to_cpu(input->offset);
1421
u16 count = le16_to_cpu(input->count);
1422
u8 *ptr;
1423
1424
if (offset > sizeof(*output)) {
1425
cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1426
return -EINVAL;
1427
}
1428
1429
if (offset + count > sizeof(*output)) {
1430
cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1431
return -EINVAL;
1432
}
1433
1434
ptr = (u8 *)&mdata->test_feat + offset;
1435
memcpy((u8 *)output + offset, ptr, count);
1436
1437
return 0;
1438
}
1439
1440
static int mock_get_feature(struct cxl_mockmem_data *mdata,
1441
struct cxl_mbox_cmd *cmd)
1442
{
1443
struct cxl_mbox_get_feat_in *input = cmd->payload_in;
1444
1445
if (uuid_equal(&input->uuid, &CXL_VENDOR_FEATURE_TEST))
1446
return mock_get_test_feature(mdata, cmd);
1447
1448
cmd->return_code = CXL_MBOX_CMD_RC_UNSUPPORTED;
1449
1450
return -EOPNOTSUPP;
1451
}
1452
1453
static int mock_set_test_feature(struct cxl_mockmem_data *mdata,
1454
struct cxl_mbox_cmd *cmd)
1455
{
1456
struct cxl_mbox_set_feat_in *input = cmd->payload_in;
1457
struct vendor_test_feat *test =
1458
(struct vendor_test_feat *)input->feat_data;
1459
u32 action;
1460
1461
action = FIELD_GET(CXL_SET_FEAT_FLAG_DATA_TRANSFER_MASK,
1462
le32_to_cpu(input->hdr.flags));
1463
/*
1464
* While it is spec compliant to support other set actions, it is not
1465
* necessary to add the complication in the emulation currently. Reject
1466
* anything besides full xfer.
1467
*/
1468
if (action != CXL_SET_FEAT_FLAG_FULL_DATA_TRANSFER) {
1469
cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1470
return -EINVAL;
1471
}
1472
1473
/* Offset should be reserved when doing full transfer */
1474
if (input->hdr.offset) {
1475
cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1476
return -EINVAL;
1477
}
1478
1479
memcpy(&mdata->test_feat.data, &test->data, sizeof(u32));
1480
1481
return 0;
1482
}
1483
1484
static int mock_set_feature(struct cxl_mockmem_data *mdata,
1485
struct cxl_mbox_cmd *cmd)
1486
{
1487
struct cxl_mbox_set_feat_in *input = cmd->payload_in;
1488
1489
if (uuid_equal(&input->hdr.uuid, &CXL_VENDOR_FEATURE_TEST))
1490
return mock_set_test_feature(mdata, cmd);
1491
1492
cmd->return_code = CXL_MBOX_CMD_RC_UNSUPPORTED;
1493
1494
return -EOPNOTSUPP;
1495
}
1496
1497
static int mock_get_supported_features(struct cxl_mockmem_data *mdata,
1498
struct cxl_mbox_cmd *cmd)
1499
{
1500
struct cxl_mbox_get_sup_feats_in *in = cmd->payload_in;
1501
struct cxl_mbox_get_sup_feats_out *out = cmd->payload_out;
1502
struct cxl_feat_entry *feat;
1503
u16 start_idx, count;
1504
1505
if (cmd->size_out < sizeof(*out)) {
1506
cmd->return_code = CXL_MBOX_CMD_RC_PAYLOADLEN;
1507
return -EINVAL;
1508
}
1509
1510
/*
1511
* Current emulation only supports 1 feature
1512
*/
1513
start_idx = le16_to_cpu(in->start_idx);
1514
if (start_idx != 0) {
1515
cmd->return_code = CXL_MBOX_CMD_RC_INPUT;
1516
return -EINVAL;
1517
}
1518
1519
count = le16_to_cpu(in->count);
1520
if (count < struct_size(out, ents, 0)) {
1521
cmd->return_code = CXL_MBOX_CMD_RC_PAYLOADLEN;
1522
return -EINVAL;
1523
}
1524
1525
out->supported_feats = cpu_to_le16(MAX_CXL_TEST_FEATS);
1526
cmd->return_code = 0;
1527
if (count < struct_size(out, ents, MAX_CXL_TEST_FEATS)) {
1528
out->num_entries = 0;
1529
return 0;
1530
}
1531
1532
out->num_entries = cpu_to_le16(MAX_CXL_TEST_FEATS);
1533
feat = out->ents;
1534
fill_feature_vendor_test(feat);
1535
1536
return 0;
1537
}
1538
1539
static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox,
1540
struct cxl_mbox_cmd *cmd)
1541
{
1542
struct device *dev = cxl_mbox->host;
1543
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1544
struct cxl_memdev_state *mds = mdata->mds;
1545
struct cxl_dev_state *cxlds = &mds->cxlds;
1546
int rc = -EIO;
1547
1548
switch (cmd->opcode) {
1549
case CXL_MBOX_OP_SET_TIMESTAMP:
1550
rc = mock_set_timestamp(cxlds, cmd);
1551
break;
1552
case CXL_MBOX_OP_GET_SUPPORTED_LOGS:
1553
rc = mock_gsl(cmd);
1554
break;
1555
case CXL_MBOX_OP_GET_LOG:
1556
rc = mock_get_log(mds, cmd);
1557
break;
1558
case CXL_MBOX_OP_IDENTIFY:
1559
if (cxlds->rcd)
1560
rc = mock_rcd_id(cmd);
1561
else
1562
rc = mock_id(cmd);
1563
break;
1564
case CXL_MBOX_OP_GET_LSA:
1565
rc = mock_get_lsa(mdata, cmd);
1566
break;
1567
case CXL_MBOX_OP_GET_PARTITION_INFO:
1568
rc = mock_partition_info(cmd);
1569
break;
1570
case CXL_MBOX_OP_GET_EVENT_RECORD:
1571
rc = mock_get_event(dev, cmd);
1572
break;
1573
case CXL_MBOX_OP_CLEAR_EVENT_RECORD:
1574
rc = mock_clear_event(dev, cmd);
1575
break;
1576
case CXL_MBOX_OP_SET_LSA:
1577
rc = mock_set_lsa(mdata, cmd);
1578
break;
1579
case CXL_MBOX_OP_GET_HEALTH_INFO:
1580
rc = mock_health_info(cmd);
1581
break;
1582
case CXL_MBOX_OP_SANITIZE:
1583
rc = mock_sanitize(mdata, cmd);
1584
break;
1585
case CXL_MBOX_OP_SECURE_ERASE:
1586
rc = mock_secure_erase(mdata, cmd);
1587
break;
1588
case CXL_MBOX_OP_GET_SECURITY_STATE:
1589
rc = mock_get_security_state(mdata, cmd);
1590
break;
1591
case CXL_MBOX_OP_SET_PASSPHRASE:
1592
rc = mock_set_passphrase(mdata, cmd);
1593
break;
1594
case CXL_MBOX_OP_DISABLE_PASSPHRASE:
1595
rc = mock_disable_passphrase(mdata, cmd);
1596
break;
1597
case CXL_MBOX_OP_FREEZE_SECURITY:
1598
rc = mock_freeze_security(mdata, cmd);
1599
break;
1600
case CXL_MBOX_OP_UNLOCK:
1601
rc = mock_unlock_security(mdata, cmd);
1602
break;
1603
case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE:
1604
rc = mock_passphrase_secure_erase(mdata, cmd);
1605
break;
1606
case CXL_MBOX_OP_SET_SHUTDOWN_STATE:
1607
rc = mock_set_shutdown_state(mdata, cmd);
1608
break;
1609
case CXL_MBOX_OP_GET_POISON:
1610
rc = mock_get_poison(cxlds, cmd);
1611
break;
1612
case CXL_MBOX_OP_INJECT_POISON:
1613
rc = mock_inject_poison(cxlds, cmd);
1614
break;
1615
case CXL_MBOX_OP_CLEAR_POISON:
1616
rc = mock_clear_poison(cxlds, cmd);
1617
break;
1618
case CXL_MBOX_OP_GET_FW_INFO:
1619
rc = mock_fw_info(mdata, cmd);
1620
break;
1621
case CXL_MBOX_OP_TRANSFER_FW:
1622
rc = mock_transfer_fw(mdata, cmd);
1623
break;
1624
case CXL_MBOX_OP_ACTIVATE_FW:
1625
rc = mock_activate_fw(mdata, cmd);
1626
break;
1627
case CXL_MBOX_OP_GET_SUPPORTED_FEATURES:
1628
rc = mock_get_supported_features(mdata, cmd);
1629
break;
1630
case CXL_MBOX_OP_GET_FEATURE:
1631
rc = mock_get_feature(mdata, cmd);
1632
break;
1633
case CXL_MBOX_OP_SET_FEATURE:
1634
rc = mock_set_feature(mdata, cmd);
1635
break;
1636
default:
1637
break;
1638
}
1639
1640
dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode,
1641
cmd->size_in, cmd->size_out, rc);
1642
1643
return rc;
1644
}
1645
1646
static void label_area_release(void *lsa)
1647
{
1648
vfree(lsa);
1649
}
1650
1651
static void fw_buf_release(void *buf)
1652
{
1653
vfree(buf);
1654
}
1655
1656
static bool is_rcd(struct platform_device *pdev)
1657
{
1658
const struct platform_device_id *id = platform_get_device_id(pdev);
1659
1660
return !!id->driver_data;
1661
}
1662
1663
static ssize_t event_trigger_store(struct device *dev,
1664
struct device_attribute *attr,
1665
const char *buf, size_t count)
1666
{
1667
cxl_mock_event_trigger(dev);
1668
return count;
1669
}
1670
static DEVICE_ATTR_WO(event_trigger);
1671
1672
static int cxl_mock_mailbox_create(struct cxl_dev_state *cxlds)
1673
{
1674
int rc;
1675
1676
rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev);
1677
if (rc)
1678
return rc;
1679
1680
return 0;
1681
}
1682
1683
static void cxl_mock_test_feat_init(struct cxl_mockmem_data *mdata)
1684
{
1685
mdata->test_feat.data = cpu_to_le32(0xdeadbeef);
1686
}
1687
1688
static int cxl_mock_mem_probe(struct platform_device *pdev)
1689
{
1690
struct device *dev = &pdev->dev;
1691
struct cxl_memdev *cxlmd;
1692
struct cxl_memdev_state *mds;
1693
struct cxl_dev_state *cxlds;
1694
struct cxl_mockmem_data *mdata;
1695
struct cxl_mailbox *cxl_mbox;
1696
struct cxl_dpa_info range_info = { 0 };
1697
int rc;
1698
1699
mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL);
1700
if (!mdata)
1701
return -ENOMEM;
1702
dev_set_drvdata(dev, mdata);
1703
1704
mdata->lsa = vmalloc(LSA_SIZE);
1705
if (!mdata->lsa)
1706
return -ENOMEM;
1707
mdata->fw = vmalloc(FW_SIZE);
1708
if (!mdata->fw)
1709
return -ENOMEM;
1710
mdata->fw_slot = 2;
1711
1712
rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa);
1713
if (rc)
1714
return rc;
1715
1716
rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw);
1717
if (rc)
1718
return rc;
1719
1720
mds = cxl_memdev_state_create(dev);
1721
if (IS_ERR(mds))
1722
return PTR_ERR(mds);
1723
1724
cxlds = &mds->cxlds;
1725
rc = cxl_mock_mailbox_create(cxlds);
1726
if (rc)
1727
return rc;
1728
1729
cxl_mbox = &mds->cxlds.cxl_mbox;
1730
mdata->mds = mds;
1731
cxl_mbox->mbox_send = cxl_mock_mbox_send;
1732
cxl_mbox->payload_size = SZ_4K;
1733
mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
1734
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
1735
1736
cxlds->serial = pdev->id + 1;
1737
if (is_rcd(pdev))
1738
cxlds->rcd = true;
1739
1740
rc = cxl_enumerate_cmds(mds);
1741
if (rc)
1742
return rc;
1743
1744
rc = cxl_poison_state_init(mds);
1745
if (rc)
1746
return rc;
1747
1748
rc = cxl_set_timestamp(mds);
1749
if (rc)
1750
return rc;
1751
1752
cxlds->media_ready = true;
1753
rc = cxl_dev_state_identify(mds);
1754
if (rc)
1755
return rc;
1756
1757
rc = cxl_mem_dpa_fetch(mds, &range_info);
1758
if (rc)
1759
return rc;
1760
1761
rc = cxl_dpa_setup(cxlds, &range_info);
1762
if (rc)
1763
return rc;
1764
1765
rc = devm_cxl_setup_features(cxlds);
1766
if (rc)
1767
dev_dbg(dev, "No CXL Features discovered\n");
1768
1769
cxl_mock_add_event_logs(&mdata->mes);
1770
1771
cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
1772
if (IS_ERR(cxlmd))
1773
return PTR_ERR(cxlmd);
1774
1775
rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
1776
if (rc)
1777
return rc;
1778
1779
rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
1780
if (rc)
1781
return rc;
1782
1783
rc = devm_cxl_setup_fwctl(&pdev->dev, cxlmd);
1784
if (rc)
1785
dev_dbg(dev, "No CXL FWCTL setup\n");
1786
1787
cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL);
1788
cxl_mock_test_feat_init(mdata);
1789
1790
return 0;
1791
}
1792
1793
static ssize_t security_lock_show(struct device *dev,
1794
struct device_attribute *attr, char *buf)
1795
{
1796
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1797
1798
return sysfs_emit(buf, "%u\n",
1799
!!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED));
1800
}
1801
1802
static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr,
1803
const char *buf, size_t count)
1804
{
1805
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1806
u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT |
1807
CXL_PMEM_SEC_STATE_MASTER_PLIMIT;
1808
int val;
1809
1810
if (kstrtoint(buf, 0, &val) < 0)
1811
return -EINVAL;
1812
1813
if (val == 1) {
1814
if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
1815
return -ENXIO;
1816
mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED;
1817
mdata->security_state &= ~mask;
1818
} else {
1819
return -EINVAL;
1820
}
1821
return count;
1822
}
1823
1824
static DEVICE_ATTR_RW(security_lock);
1825
1826
static ssize_t fw_buf_checksum_show(struct device *dev,
1827
struct device_attribute *attr, char *buf)
1828
{
1829
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1830
u8 hash[SHA256_DIGEST_SIZE];
1831
1832
sha256(mdata->fw, mdata->fw_size, hash);
1833
1834
return sysfs_emit(buf, "%*phN\n", SHA256_DIGEST_SIZE, hash);
1835
}
1836
1837
static DEVICE_ATTR_RO(fw_buf_checksum);
1838
1839
static ssize_t sanitize_timeout_show(struct device *dev,
1840
struct device_attribute *attr, char *buf)
1841
{
1842
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1843
1844
return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
1845
}
1846
1847
static ssize_t sanitize_timeout_store(struct device *dev,
1848
struct device_attribute *attr,
1849
const char *buf, size_t count)
1850
{
1851
struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
1852
unsigned long val;
1853
int rc;
1854
1855
rc = kstrtoul(buf, 0, &val);
1856
if (rc)
1857
return rc;
1858
1859
mdata->sanitize_timeout = val;
1860
1861
return count;
1862
}
1863
1864
static DEVICE_ATTR_RW(sanitize_timeout);
1865
1866
static struct attribute *cxl_mock_mem_attrs[] = {
1867
&dev_attr_security_lock.attr,
1868
&dev_attr_event_trigger.attr,
1869
&dev_attr_fw_buf_checksum.attr,
1870
&dev_attr_sanitize_timeout.attr,
1871
NULL
1872
};
1873
ATTRIBUTE_GROUPS(cxl_mock_mem);
1874
1875
static const struct platform_device_id cxl_mock_mem_ids[] = {
1876
{ .name = "cxl_mem", 0 },
1877
{ .name = "cxl_rcd", 1 },
1878
{ },
1879
};
1880
MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids);
1881
1882
static struct platform_driver cxl_mock_mem_driver = {
1883
.probe = cxl_mock_mem_probe,
1884
.id_table = cxl_mock_mem_ids,
1885
.driver = {
1886
.name = KBUILD_MODNAME,
1887
.dev_groups = cxl_mock_mem_groups,
1888
.groups = cxl_mock_mem_core_groups,
1889
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
1890
},
1891
};
1892
1893
module_platform_driver(cxl_mock_mem_driver);
1894
MODULE_LICENSE("GPL v2");
1895
MODULE_DESCRIPTION("cxl_test: mem device mock module");
1896
MODULE_IMPORT_NS("CXL");
1897
1898