Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/amdxdna/aie2_message.c
29278 views
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
* Copyright (C) 2023-2024, Advanced Micro Devices, Inc.
4
*/
5
6
#include <drm/amdxdna_accel.h>
7
#include <drm/drm_cache.h>
8
#include <drm/drm_device.h>
9
#include <drm/drm_gem.h>
10
#include <drm/drm_gem_shmem_helper.h>
11
#include <drm/drm_print.h>
12
#include <drm/gpu_scheduler.h>
13
#include <linux/bitfield.h>
14
#include <linux/errno.h>
15
#include <linux/pci.h>
16
#include <linux/types.h>
17
#include <linux/xarray.h>
18
19
#include "aie2_msg_priv.h"
20
#include "aie2_pci.h"
21
#include "amdxdna_ctx.h"
22
#include "amdxdna_gem.h"
23
#include "amdxdna_mailbox.h"
24
#include "amdxdna_mailbox_helper.h"
25
#include "amdxdna_pci_drv.h"
26
27
#define DECLARE_AIE2_MSG(name, op) \
28
DECLARE_XDNA_MSG_COMMON(name, op, MAX_AIE2_STATUS_CODE)
29
30
static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
31
struct xdna_mailbox_msg *msg)
32
{
33
struct amdxdna_dev *xdna = ndev->xdna;
34
struct xdna_notify *hdl = msg->handle;
35
int ret;
36
37
if (!ndev->mgmt_chann)
38
return -ENODEV;
39
40
drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
41
ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
42
if (ret == -ETIME) {
43
xdna_mailbox_stop_channel(ndev->mgmt_chann);
44
xdna_mailbox_destroy_channel(ndev->mgmt_chann);
45
ndev->mgmt_chann = NULL;
46
}
47
48
if (!ret && *hdl->data != AIE2_STATUS_SUCCESS) {
49
XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x",
50
msg->opcode, *hdl->data);
51
ret = -EINVAL;
52
}
53
54
return ret;
55
}
56
57
int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev)
58
{
59
DECLARE_AIE2_MSG(suspend, MSG_OP_SUSPEND);
60
61
return aie2_send_mgmt_msg_wait(ndev, &msg);
62
}
63
64
int aie2_resume_fw(struct amdxdna_dev_hdl *ndev)
65
{
66
DECLARE_AIE2_MSG(suspend, MSG_OP_RESUME);
67
68
return aie2_send_mgmt_msg_wait(ndev, &msg);
69
}
70
71
int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value)
72
{
73
DECLARE_AIE2_MSG(set_runtime_cfg, MSG_OP_SET_RUNTIME_CONFIG);
74
int ret;
75
76
req.type = type;
77
req.value = value;
78
79
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
80
if (ret) {
81
XDNA_ERR(ndev->xdna, "Failed to set runtime config, ret %d", ret);
82
return ret;
83
}
84
85
return 0;
86
}
87
88
int aie2_get_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 *value)
89
{
90
DECLARE_AIE2_MSG(get_runtime_cfg, MSG_OP_GET_RUNTIME_CONFIG);
91
int ret;
92
93
req.type = type;
94
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
95
if (ret) {
96
XDNA_ERR(ndev->xdna, "Failed to get runtime config, ret %d", ret);
97
return ret;
98
}
99
100
*value = resp.value;
101
return 0;
102
}
103
104
int aie2_assign_mgmt_pasid(struct amdxdna_dev_hdl *ndev, u16 pasid)
105
{
106
DECLARE_AIE2_MSG(assign_mgmt_pasid, MSG_OP_ASSIGN_MGMT_PASID);
107
108
req.pasid = pasid;
109
110
return aie2_send_mgmt_msg_wait(ndev, &msg);
111
}
112
113
int aie2_query_aie_version(struct amdxdna_dev_hdl *ndev, struct aie_version *version)
114
{
115
DECLARE_AIE2_MSG(aie_version_info, MSG_OP_QUERY_AIE_VERSION);
116
struct amdxdna_dev *xdna = ndev->xdna;
117
int ret;
118
119
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
120
if (ret)
121
return ret;
122
123
XDNA_DBG(xdna, "Query AIE version - major: %u minor: %u completed",
124
resp.major, resp.minor);
125
126
version->major = resp.major;
127
version->minor = resp.minor;
128
129
return 0;
130
}
131
132
int aie2_query_aie_metadata(struct amdxdna_dev_hdl *ndev, struct aie_metadata *metadata)
133
{
134
DECLARE_AIE2_MSG(aie_tile_info, MSG_OP_QUERY_AIE_TILE_INFO);
135
int ret;
136
137
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
138
if (ret)
139
return ret;
140
141
metadata->size = resp.info.size;
142
metadata->cols = resp.info.cols;
143
metadata->rows = resp.info.rows;
144
145
metadata->version.major = resp.info.major;
146
metadata->version.minor = resp.info.minor;
147
148
metadata->core.row_count = resp.info.core_rows;
149
metadata->core.row_start = resp.info.core_row_start;
150
metadata->core.dma_channel_count = resp.info.core_dma_channels;
151
metadata->core.lock_count = resp.info.core_locks;
152
metadata->core.event_reg_count = resp.info.core_events;
153
154
metadata->mem.row_count = resp.info.mem_rows;
155
metadata->mem.row_start = resp.info.mem_row_start;
156
metadata->mem.dma_channel_count = resp.info.mem_dma_channels;
157
metadata->mem.lock_count = resp.info.mem_locks;
158
metadata->mem.event_reg_count = resp.info.mem_events;
159
160
metadata->shim.row_count = resp.info.shim_rows;
161
metadata->shim.row_start = resp.info.shim_row_start;
162
metadata->shim.dma_channel_count = resp.info.shim_dma_channels;
163
metadata->shim.lock_count = resp.info.shim_locks;
164
metadata->shim.event_reg_count = resp.info.shim_events;
165
166
return 0;
167
}
168
169
int aie2_query_firmware_version(struct amdxdna_dev_hdl *ndev,
170
struct amdxdna_fw_ver *fw_ver)
171
{
172
DECLARE_AIE2_MSG(firmware_version, MSG_OP_GET_FIRMWARE_VERSION);
173
int ret;
174
175
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
176
if (ret)
177
return ret;
178
179
fw_ver->major = resp.major;
180
fw_ver->minor = resp.minor;
181
fw_ver->sub = resp.sub;
182
fw_ver->build = resp.build;
183
184
return 0;
185
}
186
187
int aie2_create_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
188
{
189
DECLARE_AIE2_MSG(create_ctx, MSG_OP_CREATE_CONTEXT);
190
struct amdxdna_dev *xdna = ndev->xdna;
191
struct xdna_mailbox_chann_res x2i;
192
struct xdna_mailbox_chann_res i2x;
193
struct cq_pair *cq_pair;
194
u32 intr_reg;
195
int ret;
196
197
req.aie_type = 1;
198
req.start_col = hwctx->start_col;
199
req.num_col = hwctx->num_col;
200
req.num_cq_pairs_requested = 1;
201
req.pasid = hwctx->client->pasid;
202
req.context_priority = 2;
203
204
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
205
if (ret)
206
return ret;
207
208
hwctx->fw_ctx_id = resp.context_id;
209
WARN_ONCE(hwctx->fw_ctx_id == -1, "Unexpected context id");
210
211
cq_pair = &resp.cq_pair[0];
212
x2i.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.head_addr);
213
x2i.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->x2i_q.tail_addr);
214
x2i.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->x2i_q.buf_addr);
215
x2i.rb_size = cq_pair->x2i_q.buf_size;
216
217
i2x.mb_head_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.head_addr);
218
i2x.mb_tail_ptr_reg = AIE2_MBOX_OFF(ndev, cq_pair->i2x_q.tail_addr);
219
i2x.rb_start_addr = AIE2_SRAM_OFF(ndev, cq_pair->i2x_q.buf_addr);
220
i2x.rb_size = cq_pair->i2x_q.buf_size;
221
222
ret = pci_irq_vector(to_pci_dev(xdna->ddev.dev), resp.msix_id);
223
if (ret == -EINVAL) {
224
XDNA_ERR(xdna, "not able to create channel");
225
goto out_destroy_context;
226
}
227
228
intr_reg = i2x.mb_head_ptr_reg + 4;
229
hwctx->priv->mbox_chann = xdna_mailbox_create_channel(ndev->mbox, &x2i, &i2x,
230
intr_reg, ret);
231
if (!hwctx->priv->mbox_chann) {
232
XDNA_ERR(xdna, "not able to create channel");
233
ret = -EINVAL;
234
goto out_destroy_context;
235
}
236
237
XDNA_DBG(xdna, "%s mailbox channel irq: %d, msix_id: %d",
238
hwctx->name, ret, resp.msix_id);
239
XDNA_DBG(xdna, "%s created fw ctx %d pasid %d", hwctx->name,
240
hwctx->fw_ctx_id, hwctx->client->pasid);
241
242
return 0;
243
244
out_destroy_context:
245
aie2_destroy_context(ndev, hwctx);
246
return ret;
247
}
248
249
int aie2_destroy_context(struct amdxdna_dev_hdl *ndev, struct amdxdna_hwctx *hwctx)
250
{
251
DECLARE_AIE2_MSG(destroy_ctx, MSG_OP_DESTROY_CONTEXT);
252
struct amdxdna_dev *xdna = ndev->xdna;
253
int ret;
254
255
if (hwctx->fw_ctx_id == -1)
256
return 0;
257
258
xdna_mailbox_stop_channel(hwctx->priv->mbox_chann);
259
260
req.context_id = hwctx->fw_ctx_id;
261
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
262
if (ret)
263
XDNA_WARN(xdna, "%s destroy context failed, ret %d", hwctx->name, ret);
264
265
xdna_mailbox_destroy_channel(hwctx->priv->mbox_chann);
266
XDNA_DBG(xdna, "%s destroyed fw ctx %d", hwctx->name,
267
hwctx->fw_ctx_id);
268
hwctx->priv->mbox_chann = NULL;
269
hwctx->fw_ctx_id = -1;
270
271
return ret;
272
}
273
274
int aie2_map_host_buf(struct amdxdna_dev_hdl *ndev, u32 context_id, u64 addr, u64 size)
275
{
276
DECLARE_AIE2_MSG(map_host_buffer, MSG_OP_MAP_HOST_BUFFER);
277
struct amdxdna_dev *xdna = ndev->xdna;
278
int ret;
279
280
req.context_id = context_id;
281
req.buf_addr = addr;
282
req.buf_size = size;
283
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
284
if (ret)
285
return ret;
286
287
XDNA_DBG(xdna, "fw ctx %d map host buf addr 0x%llx size 0x%llx",
288
context_id, addr, size);
289
290
return 0;
291
}
292
293
static int amdxdna_hwctx_col_map(struct amdxdna_hwctx *hwctx, void *arg)
294
{
295
u32 *bitmap = arg;
296
297
*bitmap |= GENMASK(hwctx->start_col + hwctx->num_col - 1, hwctx->start_col);
298
299
return 0;
300
}
301
302
int aie2_query_status(struct amdxdna_dev_hdl *ndev, char __user *buf,
303
u32 size, u32 *cols_filled)
304
{
305
DECLARE_AIE2_MSG(aie_column_info, MSG_OP_QUERY_COL_STATUS);
306
struct amdxdna_dev *xdna = ndev->xdna;
307
struct amdxdna_client *client;
308
dma_addr_t dma_addr;
309
u32 aie_bitmap = 0;
310
u8 *buff_addr;
311
int ret;
312
313
buff_addr = dma_alloc_noncoherent(xdna->ddev.dev, size, &dma_addr,
314
DMA_FROM_DEVICE, GFP_KERNEL);
315
if (!buff_addr)
316
return -ENOMEM;
317
318
/* Go through each hardware context and mark the AIE columns that are active */
319
list_for_each_entry(client, &xdna->client_list, node)
320
amdxdna_hwctx_walk(client, &aie_bitmap, amdxdna_hwctx_col_map);
321
322
*cols_filled = 0;
323
req.dump_buff_addr = dma_addr;
324
req.dump_buff_size = size;
325
req.num_cols = hweight32(aie_bitmap);
326
req.aie_bitmap = aie_bitmap;
327
328
drm_clflush_virt_range(buff_addr, size); /* device can access */
329
ret = aie2_send_mgmt_msg_wait(ndev, &msg);
330
if (ret) {
331
XDNA_ERR(xdna, "Error during NPU query, status %d", ret);
332
goto fail;
333
}
334
335
if (resp.status != AIE2_STATUS_SUCCESS) {
336
XDNA_ERR(xdna, "Query NPU status failed, status 0x%x", resp.status);
337
ret = -EINVAL;
338
goto fail;
339
}
340
XDNA_DBG(xdna, "Query NPU status completed");
341
342
if (size < resp.size) {
343
ret = -EINVAL;
344
XDNA_ERR(xdna, "Bad buffer size. Available: %u. Needs: %u", size, resp.size);
345
goto fail;
346
}
347
348
if (copy_to_user(buf, buff_addr, resp.size)) {
349
ret = -EFAULT;
350
XDNA_ERR(xdna, "Failed to copy NPU status to user space");
351
goto fail;
352
}
353
354
*cols_filled = aie_bitmap;
355
356
fail:
357
dma_free_noncoherent(xdna->ddev.dev, size, buff_addr, dma_addr, DMA_FROM_DEVICE);
358
return ret;
359
}
360
361
int aie2_register_asyn_event_msg(struct amdxdna_dev_hdl *ndev, dma_addr_t addr, u32 size,
362
void *handle, int (*cb)(void*, void __iomem *, size_t))
363
{
364
struct async_event_msg_req req = { 0 };
365
struct xdna_mailbox_msg msg = {
366
.send_data = (u8 *)&req,
367
.send_size = sizeof(req),
368
.handle = handle,
369
.opcode = MSG_OP_REGISTER_ASYNC_EVENT_MSG,
370
.notify_cb = cb,
371
};
372
373
req.buf_addr = addr;
374
req.buf_size = size;
375
376
XDNA_DBG(ndev->xdna, "Register addr 0x%llx size 0x%x", addr, size);
377
return xdna_mailbox_send_msg(ndev->mgmt_chann, &msg, TX_TIMEOUT);
378
}
379
380
int aie2_config_cu(struct amdxdna_hwctx *hwctx)
381
{
382
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
383
struct amdxdna_dev *xdna = hwctx->client->xdna;
384
u32 shift = xdna->dev_info->dev_mem_buf_shift;
385
DECLARE_AIE2_MSG(config_cu, MSG_OP_CONFIG_CU);
386
struct drm_gem_object *gobj;
387
struct amdxdna_gem_obj *abo;
388
int ret, i;
389
390
if (!chann)
391
return -ENODEV;
392
393
if (hwctx->cus->num_cus > MAX_NUM_CUS) {
394
XDNA_DBG(xdna, "Exceed maximum CU %d", MAX_NUM_CUS);
395
return -EINVAL;
396
}
397
398
for (i = 0; i < hwctx->cus->num_cus; i++) {
399
struct amdxdna_cu_config *cu = &hwctx->cus->cu_configs[i];
400
401
if (XDNA_MBZ_DBG(xdna, cu->pad, sizeof(cu->pad)))
402
return -EINVAL;
403
404
gobj = drm_gem_object_lookup(hwctx->client->filp, cu->cu_bo);
405
if (!gobj) {
406
XDNA_ERR(xdna, "Lookup GEM object failed");
407
return -EINVAL;
408
}
409
abo = to_xdna_obj(gobj);
410
411
if (abo->type != AMDXDNA_BO_DEV) {
412
drm_gem_object_put(gobj);
413
XDNA_ERR(xdna, "Invalid BO type");
414
return -EINVAL;
415
}
416
417
req.cfgs[i] = FIELD_PREP(AIE2_MSG_CFG_CU_PDI_ADDR,
418
abo->mem.dev_addr >> shift);
419
req.cfgs[i] |= FIELD_PREP(AIE2_MSG_CFG_CU_FUNC, cu->cu_func);
420
XDNA_DBG(xdna, "CU %d full addr 0x%llx, cfg 0x%x", i,
421
abo->mem.dev_addr, req.cfgs[i]);
422
drm_gem_object_put(gobj);
423
}
424
req.num_cus = hwctx->cus->num_cus;
425
426
ret = xdna_send_msg_wait(xdna, chann, &msg);
427
if (ret == -ETIME)
428
aie2_destroy_context(xdna->dev_handle, hwctx);
429
430
if (resp.status == AIE2_STATUS_SUCCESS) {
431
XDNA_DBG(xdna, "Configure %d CUs, ret %d", req.num_cus, ret);
432
return 0;
433
}
434
435
XDNA_ERR(xdna, "Command opcode 0x%x failed, status 0x%x ret %d",
436
msg.opcode, resp.status, ret);
437
return ret;
438
}
439
440
int aie2_execbuf(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
441
int (*notify_cb)(void *, void __iomem *, size_t))
442
{
443
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
444
struct amdxdna_dev *xdna = hwctx->client->xdna;
445
struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
446
union {
447
struct execute_buffer_req ebuf;
448
struct exec_dpu_req dpu;
449
} req;
450
struct xdna_mailbox_msg msg;
451
u32 payload_len;
452
void *payload;
453
int cu_idx;
454
int ret;
455
u32 op;
456
457
if (!chann)
458
return -ENODEV;
459
460
payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
461
if (!payload) {
462
XDNA_ERR(xdna, "Invalid command, cannot get payload");
463
return -EINVAL;
464
}
465
466
cu_idx = amdxdna_cmd_get_cu_idx(cmd_abo);
467
if (cu_idx < 0) {
468
XDNA_DBG(xdna, "Invalid cu idx");
469
return -EINVAL;
470
}
471
472
op = amdxdna_cmd_get_op(cmd_abo);
473
switch (op) {
474
case ERT_START_CU:
475
if (unlikely(payload_len > sizeof(req.ebuf.payload)))
476
XDNA_DBG(xdna, "Invalid ebuf payload len: %d", payload_len);
477
req.ebuf.cu_idx = cu_idx;
478
memcpy(req.ebuf.payload, payload, sizeof(req.ebuf.payload));
479
msg.send_size = sizeof(req.ebuf);
480
msg.opcode = MSG_OP_EXECUTE_BUFFER_CF;
481
break;
482
case ERT_START_NPU: {
483
struct amdxdna_cmd_start_npu *sn = payload;
484
485
if (unlikely(payload_len - sizeof(*sn) > sizeof(req.dpu.payload)))
486
XDNA_DBG(xdna, "Invalid dpu payload len: %d", payload_len);
487
req.dpu.inst_buf_addr = sn->buffer;
488
req.dpu.inst_size = sn->buffer_size;
489
req.dpu.inst_prop_cnt = sn->prop_count;
490
req.dpu.cu_idx = cu_idx;
491
memcpy(req.dpu.payload, sn->prop_args, sizeof(req.dpu.payload));
492
msg.send_size = sizeof(req.dpu);
493
msg.opcode = MSG_OP_EXEC_DPU;
494
break;
495
}
496
default:
497
XDNA_DBG(xdna, "Invalid ERT cmd op code: %d", op);
498
return -EINVAL;
499
}
500
msg.handle = job;
501
msg.notify_cb = notify_cb;
502
msg.send_data = (u8 *)&req;
503
print_hex_dump_debug("cmd: ", DUMP_PREFIX_OFFSET, 16, 4, &req,
504
0x40, false);
505
506
ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
507
if (ret) {
508
XDNA_ERR(xdna, "Send message failed");
509
return ret;
510
}
511
512
return 0;
513
}
514
515
static int
516
aie2_cmdlist_fill_one_slot_cf(void *cmd_buf, u32 offset,
517
struct amdxdna_gem_obj *abo, u32 *size)
518
{
519
struct cmd_chain_slot_execbuf_cf *buf = cmd_buf + offset;
520
int cu_idx = amdxdna_cmd_get_cu_idx(abo);
521
u32 payload_len;
522
void *payload;
523
524
if (cu_idx < 0)
525
return -EINVAL;
526
527
payload = amdxdna_cmd_get_payload(abo, &payload_len);
528
if (!payload)
529
return -EINVAL;
530
531
if (!slot_has_space(*buf, offset, payload_len))
532
return -ENOSPC;
533
534
buf->cu_idx = cu_idx;
535
buf->arg_cnt = payload_len / sizeof(u32);
536
memcpy(buf->args, payload, payload_len);
537
/* Accurate buf size to hint firmware to do necessary copy */
538
*size = sizeof(*buf) + payload_len;
539
return 0;
540
}
541
542
static int
543
aie2_cmdlist_fill_one_slot_dpu(void *cmd_buf, u32 offset,
544
struct amdxdna_gem_obj *abo, u32 *size)
545
{
546
struct cmd_chain_slot_dpu *buf = cmd_buf + offset;
547
int cu_idx = amdxdna_cmd_get_cu_idx(abo);
548
struct amdxdna_cmd_start_npu *sn;
549
u32 payload_len;
550
void *payload;
551
u32 arg_sz;
552
553
if (cu_idx < 0)
554
return -EINVAL;
555
556
payload = amdxdna_cmd_get_payload(abo, &payload_len);
557
if (!payload)
558
return -EINVAL;
559
sn = payload;
560
arg_sz = payload_len - sizeof(*sn);
561
if (payload_len < sizeof(*sn) || arg_sz > MAX_DPU_ARGS_SIZE)
562
return -EINVAL;
563
564
if (!slot_has_space(*buf, offset, arg_sz))
565
return -ENOSPC;
566
567
buf->inst_buf_addr = sn->buffer;
568
buf->inst_size = sn->buffer_size;
569
buf->inst_prop_cnt = sn->prop_count;
570
buf->cu_idx = cu_idx;
571
buf->arg_cnt = arg_sz / sizeof(u32);
572
memcpy(buf->args, sn->prop_args, arg_sz);
573
574
/* Accurate buf size to hint firmware to do necessary copy */
575
*size = sizeof(*buf) + arg_sz;
576
return 0;
577
}
578
579
static int
580
aie2_cmdlist_fill_one_slot(u32 op, struct amdxdna_gem_obj *cmdbuf_abo, u32 offset,
581
struct amdxdna_gem_obj *abo, u32 *size)
582
{
583
u32 this_op = amdxdna_cmd_get_op(abo);
584
void *cmd_buf = cmdbuf_abo->mem.kva;
585
int ret;
586
587
if (this_op != op) {
588
ret = -EINVAL;
589
goto done;
590
}
591
592
switch (op) {
593
case ERT_START_CU:
594
ret = aie2_cmdlist_fill_one_slot_cf(cmd_buf, offset, abo, size);
595
break;
596
case ERT_START_NPU:
597
ret = aie2_cmdlist_fill_one_slot_dpu(cmd_buf, offset, abo, size);
598
break;
599
default:
600
ret = -EOPNOTSUPP;
601
}
602
603
done:
604
if (ret) {
605
XDNA_ERR(abo->client->xdna, "Can't fill slot for cmd op %d ret %d",
606
op, ret);
607
}
608
return ret;
609
}
610
611
static inline struct amdxdna_gem_obj *
612
aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
613
{
614
int idx = get_job_idx(job->seq);
615
616
return job->hwctx->priv->cmd_buf[idx];
617
}
618
619
static void
620
aie2_cmdlist_prepare_request(struct cmd_chain_req *req,
621
struct amdxdna_gem_obj *cmdbuf_abo, u32 size, u32 cnt)
622
{
623
req->buf_addr = cmdbuf_abo->mem.dev_addr;
624
req->buf_size = size;
625
req->count = cnt;
626
drm_clflush_virt_range(cmdbuf_abo->mem.kva, size);
627
XDNA_DBG(cmdbuf_abo->client->xdna, "Command buf addr 0x%llx size 0x%x count %d",
628
req->buf_addr, size, cnt);
629
}
630
631
static inline u32
632
aie2_cmd_op_to_msg_op(u32 op)
633
{
634
switch (op) {
635
case ERT_START_CU:
636
return MSG_OP_CHAIN_EXEC_BUFFER_CF;
637
case ERT_START_NPU:
638
return MSG_OP_CHAIN_EXEC_DPU;
639
default:
640
return MSG_OP_MAX_OPCODE;
641
}
642
}
643
644
int aie2_cmdlist_multi_execbuf(struct amdxdna_hwctx *hwctx,
645
struct amdxdna_sched_job *job,
646
int (*notify_cb)(void *, void __iomem *, size_t))
647
{
648
struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
649
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
650
struct amdxdna_client *client = hwctx->client;
651
struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
652
struct amdxdna_cmd_chain *payload;
653
struct xdna_mailbox_msg msg;
654
struct cmd_chain_req req;
655
u32 payload_len;
656
u32 offset = 0;
657
u32 size;
658
int ret;
659
u32 op;
660
u32 i;
661
662
op = amdxdna_cmd_get_op(cmd_abo);
663
payload = amdxdna_cmd_get_payload(cmd_abo, &payload_len);
664
if (op != ERT_CMD_CHAIN || !payload ||
665
payload_len < struct_size(payload, data, payload->command_count))
666
return -EINVAL;
667
668
for (i = 0; i < payload->command_count; i++) {
669
u32 boh = (u32)(payload->data[i]);
670
struct amdxdna_gem_obj *abo;
671
672
abo = amdxdna_gem_get_obj(client, boh, AMDXDNA_BO_CMD);
673
if (!abo) {
674
XDNA_ERR(client->xdna, "Failed to find cmd BO %d", boh);
675
return -ENOENT;
676
}
677
678
/* All sub-cmd should have same op, use the first one. */
679
if (i == 0)
680
op = amdxdna_cmd_get_op(abo);
681
682
ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, offset, abo, &size);
683
amdxdna_gem_put_obj(abo);
684
if (ret)
685
return -EINVAL;
686
687
offset += size;
688
}
689
690
/* The offset is the accumulated total size of the cmd buffer */
691
aie2_cmdlist_prepare_request(&req, cmdbuf_abo, offset, payload->command_count);
692
693
msg.opcode = aie2_cmd_op_to_msg_op(op);
694
if (msg.opcode == MSG_OP_MAX_OPCODE)
695
return -EOPNOTSUPP;
696
msg.handle = job;
697
msg.notify_cb = notify_cb;
698
msg.send_data = (u8 *)&req;
699
msg.send_size = sizeof(req);
700
ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
701
if (ret) {
702
XDNA_ERR(hwctx->client->xdna, "Send message failed");
703
return ret;
704
}
705
706
return 0;
707
}
708
709
int aie2_cmdlist_single_execbuf(struct amdxdna_hwctx *hwctx,
710
struct amdxdna_sched_job *job,
711
int (*notify_cb)(void *, void __iomem *, size_t))
712
{
713
struct amdxdna_gem_obj *cmdbuf_abo = aie2_cmdlist_get_cmd_buf(job);
714
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
715
struct amdxdna_gem_obj *cmd_abo = job->cmd_bo;
716
struct xdna_mailbox_msg msg;
717
struct cmd_chain_req req;
718
u32 size;
719
int ret;
720
u32 op;
721
722
op = amdxdna_cmd_get_op(cmd_abo);
723
ret = aie2_cmdlist_fill_one_slot(op, cmdbuf_abo, 0, cmd_abo, &size);
724
if (ret)
725
return ret;
726
727
aie2_cmdlist_prepare_request(&req, cmdbuf_abo, size, 1);
728
729
msg.opcode = aie2_cmd_op_to_msg_op(op);
730
if (msg.opcode == MSG_OP_MAX_OPCODE)
731
return -EOPNOTSUPP;
732
msg.handle = job;
733
msg.notify_cb = notify_cb;
734
msg.send_data = (u8 *)&req;
735
msg.send_size = sizeof(req);
736
ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
737
if (ret) {
738
XDNA_ERR(hwctx->client->xdna, "Send message failed");
739
return ret;
740
}
741
742
return 0;
743
}
744
745
int aie2_sync_bo(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
746
int (*notify_cb)(void *, void __iomem *, size_t))
747
{
748
struct mailbox_channel *chann = hwctx->priv->mbox_chann;
749
struct amdxdna_gem_obj *abo = to_xdna_obj(job->bos[0]);
750
struct amdxdna_dev *xdna = hwctx->client->xdna;
751
struct xdna_mailbox_msg msg;
752
struct sync_bo_req req;
753
int ret = 0;
754
755
req.src_addr = 0;
756
req.dst_addr = abo->mem.dev_addr - hwctx->client->dev_heap->mem.dev_addr;
757
req.size = abo->mem.size;
758
759
/* Device to Host */
760
req.type = FIELD_PREP(AIE2_MSG_SYNC_BO_SRC_TYPE, SYNC_BO_DEV_MEM) |
761
FIELD_PREP(AIE2_MSG_SYNC_BO_DST_TYPE, SYNC_BO_HOST_MEM);
762
763
XDNA_DBG(xdna, "sync %d bytes src(0x%llx) to dst(0x%llx) completed",
764
req.size, req.src_addr, req.dst_addr);
765
766
msg.handle = job;
767
msg.notify_cb = notify_cb;
768
msg.send_data = (u8 *)&req;
769
msg.send_size = sizeof(req);
770
msg.opcode = MSG_OP_SYNC_BO;
771
772
ret = xdna_mailbox_send_msg(chann, &msg, TX_TIMEOUT);
773
if (ret) {
774
XDNA_ERR(xdna, "Send message failed");
775
return ret;
776
}
777
778
return 0;
779
}
780
781