Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/accel/ivpu/ivpu_hw_btrs.c
29278 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
/*
3
* Copyright (C) 2020-2025 Intel Corporation
4
*/
5
6
#include <linux/units.h>
7
8
#include "ivpu_drv.h"
9
#include "ivpu_hw.h"
10
#include "ivpu_hw_btrs.h"
11
#include "ivpu_hw_btrs_lnl_reg.h"
12
#include "ivpu_hw_btrs_mtl_reg.h"
13
#include "ivpu_hw_reg_io.h"
14
#include "ivpu_pm.h"
15
16
#define BTRS_MTL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR)) | \
17
(REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR)))
18
19
#define BTRS_LNL_IRQ_MASK ((REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR)) | \
20
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR)) | \
21
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR)) | \
22
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR)) | \
23
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR)) | \
24
(REG_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR)))
25
26
#define BTRS_MTL_ALL_IRQ_MASK (BTRS_MTL_IRQ_MASK | (REG_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, \
27
FREQ_CHANGE)))
28
29
#define BTRS_IRQ_DISABLE_MASK ((u32)-1)
30
31
#define BTRS_LNL_ALL_IRQ_MASK ((u32)-1)
32
33
34
#define PLL_CDYN_DEFAULT 0x80
35
#define PLL_EPP_DEFAULT 0x80
36
#define PLL_REF_CLK_FREQ 50000000ull
37
#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
38
39
#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
40
#define IDLE_TIMEOUT_US (5 * USEC_PER_MSEC)
41
#define TIMEOUT_US (150 * USEC_PER_MSEC)
42
43
/* Work point configuration values */
44
#define WP_CONFIG(tile, ratio) (((tile) << 8) | (ratio))
45
#define MTL_CONFIG_1_TILE 0x01
46
#define MTL_CONFIG_2_TILE 0x02
47
#define MTL_PLL_RATIO_5_3 0x01
48
#define MTL_PLL_RATIO_4_3 0x02
49
#define BTRS_MTL_TILE_FUSE_ENABLE_BOTH 0x0
50
#define BTRS_MTL_TILE_SKU_BOTH 0x3630
51
52
#define BTRS_LNL_TILE_MAX_NUM 6
53
#define BTRS_LNL_TILE_MAX_MASK 0x3f
54
55
#define WEIGHTS_DEFAULT 0xf711f711u
56
#define WEIGHTS_ATS_DEFAULT 0x0000f711u
57
58
#define DCT_REQ 0x2
59
#define DCT_ENABLE 0x1
60
#define DCT_DISABLE 0x0
61
62
static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio);
63
64
int ivpu_hw_btrs_irqs_clear_with_0_mtl(struct ivpu_device *vdev)
65
{
66
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, BTRS_MTL_ALL_IRQ_MASK);
67
if (REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) == BTRS_MTL_ALL_IRQ_MASK) {
68
/* Writing 1s does not clear the interrupt status register */
69
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
70
return true;
71
}
72
73
return false;
74
}
75
76
static void freq_ratios_init_mtl(struct ivpu_device *vdev)
77
{
78
struct ivpu_hw_info *hw = vdev->hw;
79
u32 fmin_fuse, fmax_fuse;
80
81
fmin_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMIN_FUSE);
82
hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
83
hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMIN_FUSE, PN_RATIO, fmin_fuse);
84
85
fmax_fuse = REGB_RD32(VPU_HW_BTRS_MTL_FMAX_FUSE);
86
hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_MTL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
87
}
88
89
static void freq_ratios_init_lnl(struct ivpu_device *vdev)
90
{
91
struct ivpu_hw_info *hw = vdev->hw;
92
u32 fmin_fuse, fmax_fuse;
93
94
fmin_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMIN_FUSE);
95
hw->pll.min_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, MIN_RATIO, fmin_fuse);
96
hw->pll.pn_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMIN_FUSE, PN_RATIO, fmin_fuse);
97
98
fmax_fuse = REGB_RD32(VPU_HW_BTRS_LNL_FMAX_FUSE);
99
hw->pll.max_ratio = REG_GET_FLD(VPU_HW_BTRS_LNL_FMAX_FUSE, MAX_RATIO, fmax_fuse);
100
}
101
102
void ivpu_hw_btrs_freq_ratios_init(struct ivpu_device *vdev)
103
{
104
struct ivpu_hw_info *hw = vdev->hw;
105
106
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
107
freq_ratios_init_mtl(vdev);
108
else
109
freq_ratios_init_lnl(vdev);
110
111
hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
112
hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
113
hw->pll.pn_ratio = clamp_t(u8, hw->pll.pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
114
}
115
116
static bool tile_disable_check(u32 config)
117
{
118
/* Allowed values: 0 or one bit from range 0-5 (6 tiles) */
119
if (config == 0)
120
return true;
121
122
if (config > BIT(BTRS_LNL_TILE_MAX_NUM - 1))
123
return false;
124
125
if ((config & (config - 1)) == 0)
126
return true;
127
128
return false;
129
}
130
131
static int read_tile_config_fuse(struct ivpu_device *vdev, u32 *tile_fuse_config)
132
{
133
u32 fuse;
134
u32 config;
135
136
fuse = REGB_RD32(VPU_HW_BTRS_LNL_TILE_FUSE);
137
if (!REG_TEST_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, VALID, fuse)) {
138
ivpu_err(vdev, "Fuse: invalid (0x%x)\n", fuse);
139
return -EIO;
140
}
141
142
config = REG_GET_FLD(VPU_HW_BTRS_LNL_TILE_FUSE, CONFIG, fuse);
143
if (!tile_disable_check(config))
144
ivpu_warn(vdev, "More than 1 tile disabled, tile fuse config mask: 0x%x\n", config);
145
146
ivpu_dbg(vdev, MISC, "Tile disable config mask: 0x%x\n", config);
147
148
*tile_fuse_config = config;
149
return 0;
150
}
151
152
static int info_init_mtl(struct ivpu_device *vdev)
153
{
154
struct ivpu_hw_info *hw = vdev->hw;
155
156
hw->tile_fuse = BTRS_MTL_TILE_FUSE_ENABLE_BOTH;
157
hw->sku = BTRS_MTL_TILE_SKU_BOTH;
158
hw->config = WP_CONFIG(MTL_CONFIG_2_TILE, MTL_PLL_RATIO_4_3);
159
160
return 0;
161
}
162
163
static int info_init_lnl(struct ivpu_device *vdev)
164
{
165
struct ivpu_hw_info *hw = vdev->hw;
166
u32 tile_fuse_config;
167
int ret;
168
169
ret = read_tile_config_fuse(vdev, &tile_fuse_config);
170
if (ret)
171
return ret;
172
173
hw->tile_fuse = tile_fuse_config;
174
hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
175
176
return 0;
177
}
178
179
int ivpu_hw_btrs_info_init(struct ivpu_device *vdev)
180
{
181
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
182
return info_init_mtl(vdev);
183
else
184
return info_init_lnl(vdev);
185
}
186
187
static int wp_request_sync(struct ivpu_device *vdev)
188
{
189
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
190
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
191
else
192
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
193
}
194
195
static int wait_for_status_ready(struct ivpu_device *vdev, bool enable)
196
{
197
u32 exp_val = enable ? 0x1 : 0x0;
198
199
if (IVPU_WA(punit_disabled))
200
return 0;
201
202
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
203
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
204
else
205
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, exp_val, PLL_TIMEOUT_US);
206
}
207
208
struct wp_request {
209
u16 min;
210
u16 max;
211
u16 target;
212
u16 cfg;
213
u16 epp;
214
u16 cdyn;
215
};
216
217
static void wp_request_mtl(struct ivpu_device *vdev, struct wp_request *wp)
218
{
219
u32 val;
220
221
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0);
222
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
223
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
224
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD0, val);
225
226
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1);
227
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
228
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, EPP, PLL_EPP_DEFAULT, val);
229
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD1, val);
230
231
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2);
232
val = REG_SET_FLD_NUM(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
233
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_PAYLOAD2, val);
234
235
val = REGB_RD32(VPU_HW_BTRS_MTL_WP_REQ_CMD);
236
val = REG_SET_FLD(VPU_HW_BTRS_MTL_WP_REQ_CMD, SEND, val);
237
REGB_WR32(VPU_HW_BTRS_MTL_WP_REQ_CMD, val);
238
}
239
240
static void wp_request_lnl(struct ivpu_device *vdev, struct wp_request *wp)
241
{
242
u32 val;
243
244
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0);
245
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MIN_RATIO, wp->min, val);
246
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, MAX_RATIO, wp->max, val);
247
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD0, val);
248
249
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1);
250
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, TARGET_RATIO, wp->target, val);
251
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, EPP, wp->epp, val);
252
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD1, val);
253
254
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2);
255
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CONFIG, wp->cfg, val);
256
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, CDYN, wp->cdyn, val);
257
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_PAYLOAD2, val);
258
259
val = REGB_RD32(VPU_HW_BTRS_LNL_WP_REQ_CMD);
260
val = REG_SET_FLD(VPU_HW_BTRS_LNL_WP_REQ_CMD, SEND, val);
261
REGB_WR32(VPU_HW_BTRS_LNL_WP_REQ_CMD, val);
262
}
263
264
static void wp_request(struct ivpu_device *vdev, struct wp_request *wp)
265
{
266
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
267
wp_request_mtl(vdev, wp);
268
else
269
wp_request_lnl(vdev, wp);
270
}
271
272
static int wp_request_send(struct ivpu_device *vdev, struct wp_request *wp)
273
{
274
int ret;
275
276
ret = wp_request_sync(vdev);
277
if (ret) {
278
ivpu_err(vdev, "Failed to sync before workpoint request: %d\n", ret);
279
return ret;
280
}
281
282
wp_request(vdev, wp);
283
284
ret = wp_request_sync(vdev);
285
if (ret)
286
ivpu_err(vdev, "Failed to sync after workpoint request: %d\n", ret);
287
288
return ret;
289
}
290
291
static void prepare_wp_request(struct ivpu_device *vdev, struct wp_request *wp, bool enable)
292
{
293
struct ivpu_hw_info *hw = vdev->hw;
294
295
wp->min = hw->pll.min_ratio;
296
wp->max = hw->pll.max_ratio;
297
298
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
299
wp->target = enable ? hw->pll.pn_ratio : 0;
300
wp->cfg = enable ? hw->config : 0;
301
wp->cdyn = 0;
302
wp->epp = 0;
303
} else {
304
wp->target = hw->pll.pn_ratio;
305
wp->cfg = 0;
306
wp->cdyn = enable ? PLL_CDYN_DEFAULT : 0;
307
wp->epp = enable ? PLL_EPP_DEFAULT : 0;
308
}
309
}
310
311
static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable)
312
{
313
u32 exp_val = enable ? 0x1 : 0x0;
314
315
if (ivpu_hw_btrs_gen(vdev) != IVPU_HW_BTRS_MTL)
316
return 0;
317
318
if (IVPU_WA(punit_disabled))
319
return 0;
320
321
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
322
}
323
324
int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable)
325
{
326
struct wp_request wp;
327
int ret;
328
329
if (IVPU_WA(punit_disabled)) {
330
ivpu_dbg(vdev, PM, "Skipping workpoint request\n");
331
return 0;
332
}
333
334
prepare_wp_request(vdev, &wp, enable);
335
336
ivpu_dbg(vdev, PM, "PLL workpoint request: %lu MHz, config: 0x%x, epp: 0x%x, cdyn: 0x%x\n",
337
pll_ratio_to_dpu_freq(vdev, wp.target) / HZ_PER_MHZ, wp.cfg, wp.epp, wp.cdyn);
338
339
ret = wp_request_send(vdev, &wp);
340
if (ret) {
341
ivpu_err(vdev, "Failed to send workpoint request: %d\n", ret);
342
return ret;
343
}
344
345
ret = wait_for_pll_lock(vdev, enable);
346
if (ret) {
347
ivpu_err(vdev, "Timed out waiting for PLL lock\n");
348
return ret;
349
}
350
351
ret = wait_for_status_ready(vdev, enable);
352
if (ret) {
353
ivpu_err(vdev, "Timed out waiting for NPU ready status\n");
354
return ret;
355
}
356
357
return 0;
358
}
359
360
static int d0i3_drive_mtl(struct ivpu_device *vdev, bool enable)
361
{
362
int ret;
363
u32 val;
364
365
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
366
if (ret) {
367
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
368
return ret;
369
}
370
371
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL);
372
if (enable)
373
val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
374
else
375
val = REG_CLR_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, I3, val);
376
REGB_WR32(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, val);
377
378
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
379
if (ret)
380
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
381
382
return ret;
383
}
384
385
static int d0i3_drive_lnl(struct ivpu_device *vdev, bool enable)
386
{
387
int ret;
388
u32 val;
389
390
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
391
if (ret) {
392
ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
393
return ret;
394
}
395
396
val = REGB_RD32(VPU_HW_BTRS_LNL_D0I3_CONTROL);
397
if (enable)
398
val = REG_SET_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
399
else
400
val = REG_CLR_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, I3, val);
401
REGB_WR32(VPU_HW_BTRS_LNL_D0I3_CONTROL, val);
402
403
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
404
if (ret) {
405
ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
406
return ret;
407
}
408
409
return 0;
410
}
411
412
static int d0i3_drive(struct ivpu_device *vdev, bool enable)
413
{
414
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
415
return d0i3_drive_mtl(vdev, enable);
416
else
417
return d0i3_drive_lnl(vdev, enable);
418
}
419
420
int ivpu_hw_btrs_d0i3_enable(struct ivpu_device *vdev)
421
{
422
int ret;
423
424
if (IVPU_WA(punit_disabled))
425
return 0;
426
427
ret = d0i3_drive(vdev, true);
428
if (ret)
429
ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
430
431
udelay(5); /* VPU requires 5 us to complete the transition */
432
433
return ret;
434
}
435
436
int ivpu_hw_btrs_d0i3_disable(struct ivpu_device *vdev)
437
{
438
int ret;
439
440
if (IVPU_WA(punit_disabled))
441
return 0;
442
443
ret = d0i3_drive(vdev, false);
444
if (ret)
445
ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
446
447
return ret;
448
}
449
450
int ivpu_hw_btrs_wait_for_clock_res_own_ack(struct ivpu_device *vdev)
451
{
452
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
453
return 0;
454
455
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, CLOCK_RESOURCE_OWN_ACK, 1, TIMEOUT_US);
456
}
457
458
void ivpu_hw_btrs_set_port_arbitration_weights_lnl(struct ivpu_device *vdev)
459
{
460
REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS, WEIGHTS_DEFAULT);
461
REGB_WR32(VPU_HW_BTRS_LNL_PORT_ARBITRATION_WEIGHTS_ATS, WEIGHTS_ATS_DEFAULT);
462
}
463
464
static int ip_reset_mtl(struct ivpu_device *vdev)
465
{
466
int ret;
467
u32 val;
468
469
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
470
if (ret) {
471
ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
472
return ret;
473
}
474
475
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_IP_RESET);
476
val = REG_SET_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, val);
477
REGB_WR32(VPU_HW_BTRS_MTL_VPU_IP_RESET, val);
478
479
ret = REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
480
if (ret)
481
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
482
483
return ret;
484
}
485
486
static int ip_reset_lnl(struct ivpu_device *vdev)
487
{
488
int ret;
489
u32 val;
490
491
ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
492
493
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
494
if (ret) {
495
ivpu_err(vdev, "Wait for *_TRIGGER timed out\n");
496
return ret;
497
}
498
499
val = REGB_RD32(VPU_HW_BTRS_LNL_IP_RESET);
500
val = REG_SET_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, val);
501
REGB_WR32(VPU_HW_BTRS_LNL_IP_RESET, val);
502
503
ret = REGB_POLL_FLD(VPU_HW_BTRS_LNL_IP_RESET, TRIGGER, 0, TIMEOUT_US);
504
if (ret)
505
ivpu_err(vdev, "Timed out waiting for RESET completion\n");
506
507
return ret;
508
}
509
510
int ivpu_hw_btrs_ip_reset(struct ivpu_device *vdev)
511
{
512
if (IVPU_WA(punit_disabled))
513
return 0;
514
515
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
516
return ip_reset_mtl(vdev);
517
else
518
return ip_reset_lnl(vdev);
519
}
520
521
void ivpu_hw_btrs_profiling_freq_reg_set_lnl(struct ivpu_device *vdev)
522
{
523
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
524
525
if (vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_DEFAULT)
526
val = REG_CLR_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
527
else
528
val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PERF_CLK, val);
529
530
REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
531
}
532
533
void ivpu_hw_btrs_ats_print_lnl(struct ivpu_device *vdev)
534
{
535
ivpu_dbg(vdev, MISC, "Buttress ATS: %s\n",
536
REGB_RD32(VPU_HW_BTRS_LNL_HM_ATS) ? "Enable" : "Disable");
537
}
538
539
void ivpu_hw_btrs_clock_relinquish_disable_lnl(struct ivpu_device *vdev)
540
{
541
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
542
543
val = REG_SET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, DISABLE_CLK_RELINQUISH, val);
544
REGB_WR32(VPU_HW_BTRS_LNL_VPU_STATUS, val);
545
}
546
547
bool ivpu_hw_btrs_is_idle(struct ivpu_device *vdev)
548
{
549
u32 val;
550
551
if (IVPU_WA(punit_disabled))
552
return true;
553
554
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
555
val = REGB_RD32(VPU_HW_BTRS_MTL_VPU_STATUS);
556
557
return REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, READY, val) &&
558
REG_TEST_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, val);
559
} else {
560
val = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
561
562
return REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, READY, val) &&
563
REG_TEST_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, val);
564
}
565
}
566
567
int ivpu_hw_btrs_wait_for_idle(struct ivpu_device *vdev)
568
{
569
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
570
return REGB_POLL_FLD(VPU_HW_BTRS_MTL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
571
else
572
return REGB_POLL_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, IDLE, 0x1, IDLE_TIMEOUT_US);
573
}
574
575
static u32 pll_config_get_mtl(struct ivpu_device *vdev)
576
{
577
return REGB_RD32(VPU_HW_BTRS_MTL_CURRENT_PLL);
578
}
579
580
static u32 pll_config_get_lnl(struct ivpu_device *vdev)
581
{
582
return REGB_RD32(VPU_HW_BTRS_LNL_PLL_FREQ);
583
}
584
585
static u32 pll_ratio_to_dpu_freq_mtl(u16 ratio)
586
{
587
return (PLL_RATIO_TO_FREQ(ratio) * 2) / 3;
588
}
589
590
static u32 pll_ratio_to_dpu_freq_lnl(u16 ratio)
591
{
592
return PLL_RATIO_TO_FREQ(ratio) / 2;
593
}
594
595
static u32 pll_ratio_to_dpu_freq(struct ivpu_device *vdev, u32 ratio)
596
{
597
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
598
return pll_ratio_to_dpu_freq_mtl(ratio);
599
else
600
return pll_ratio_to_dpu_freq_lnl(ratio);
601
}
602
603
u32 ivpu_hw_btrs_dpu_max_freq_get(struct ivpu_device *vdev)
604
{
605
return pll_ratio_to_dpu_freq(vdev, vdev->hw->pll.max_ratio);
606
}
607
608
u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev)
609
{
610
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
611
return pll_ratio_to_dpu_freq_mtl(pll_config_get_mtl(vdev));
612
else
613
return pll_ratio_to_dpu_freq_lnl(pll_config_get_lnl(vdev));
614
}
615
616
/* Handler for IRQs from Buttress core (irqB) */
617
bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq)
618
{
619
u32 status = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
620
bool schedule_recovery = false;
621
622
if (!status)
623
return false;
624
625
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
626
u32 pll = pll_config_get_mtl(vdev);
627
628
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
629
pll, pll_ratio_to_dpu_freq_mtl(pll) / HZ_PER_MHZ);
630
}
631
632
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, status)) {
633
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
634
REGB_WR32(VPU_HW_BTRS_MTL_ATS_ERR_CLEAR, 0x1);
635
schedule_recovery = true;
636
}
637
638
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, status)) {
639
u32 ufi_log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
640
641
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
642
ufi_log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, ufi_log),
643
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, ufi_log),
644
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, ufi_log));
645
REGB_WR32(VPU_HW_BTRS_MTL_UFI_ERR_CLEAR, 0x1);
646
schedule_recovery = true;
647
}
648
649
/* This must be done after interrupts are cleared at the source. */
650
if (IVPU_WA(interrupt_clear_with_0))
651
/*
652
* Writing 1 triggers an interrupt, so we can't perform read update write.
653
* Clear local interrupt status by writing 0 to all bits.
654
*/
655
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, 0x0);
656
else
657
REGB_WR32(VPU_HW_BTRS_MTL_INTERRUPT_STAT, status);
658
659
if (schedule_recovery)
660
ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
661
662
return true;
663
}
664
665
/* Handler for IRQs from Buttress core (irqB) */
666
bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
667
{
668
u32 status = REGB_RD32(VPU_HW_BTRS_LNL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
669
bool schedule_recovery = false;
670
671
if (!status)
672
return false;
673
674
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
675
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
676
queue_work(system_wq, &vdev->irq_dct_work);
677
}
678
679
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
680
u32 pll = pll_config_get_lnl(vdev);
681
682
ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq, wp %08x, %lu MHz",
683
pll, pll_ratio_to_dpu_freq_lnl(pll) / HZ_PER_MHZ);
684
}
685
686
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, status)) {
687
ivpu_err(vdev, "ATS_ERR LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
688
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
689
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
690
REGB_WR32(VPU_HW_BTRS_LNL_ATS_ERR_CLEAR, 0x1);
691
schedule_recovery = true;
692
}
693
694
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, status)) {
695
ivpu_err(vdev, "CFI0_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
696
REGB_WR32(VPU_HW_BTRS_LNL_CFI0_ERR_CLEAR, 0x1);
697
schedule_recovery = true;
698
}
699
700
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, status)) {
701
ivpu_err(vdev, "CFI1_ERR 0x%08x", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
702
REGB_WR32(VPU_HW_BTRS_LNL_CFI1_ERR_CLEAR, 0x1);
703
schedule_recovery = true;
704
}
705
706
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, status)) {
707
ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x",
708
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
709
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
710
REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_CLEAR, 0x1);
711
schedule_recovery = true;
712
}
713
714
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, status)) {
715
ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x",
716
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
717
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
718
REGB_WR32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_CLEAR, 0x1);
719
schedule_recovery = true;
720
}
721
722
/* This must be done after interrupts are cleared at the source. */
723
REGB_WR32(VPU_HW_BTRS_LNL_INTERRUPT_STAT, status);
724
725
if (schedule_recovery)
726
ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
727
728
return true;
729
}
730
731
int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable)
732
{
733
u32 val = REGB_RD32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW);
734
u32 cmd = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, CMD, val);
735
u32 param1 = REG_GET_FLD(VPU_HW_BTRS_LNL_PCODE_MAILBOX_SHADOW, PARAM1, val);
736
737
if (cmd != DCT_REQ) {
738
ivpu_err_ratelimited(vdev, "Unsupported PCODE command: 0x%x\n", cmd);
739
return -EBADR;
740
}
741
742
switch (param1) {
743
case DCT_ENABLE:
744
*enable = true;
745
return 0;
746
case DCT_DISABLE:
747
*enable = false;
748
return 0;
749
default:
750
ivpu_err_ratelimited(vdev, "Invalid PARAM1 value: %u\n", param1);
751
return -EINVAL;
752
}
753
}
754
755
void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent)
756
{
757
u32 val = 0;
758
u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE;
759
760
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, CMD, DCT_REQ, val);
761
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM1, cmd, val);
762
val = REG_SET_FLD_NUM(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, PARAM2, active_percent, val);
763
764
REGB_WR32(VPU_HW_BTRS_LNL_PCODE_MAILBOX_STATUS, val);
765
}
766
767
u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev)
768
{
769
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
770
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_OFFSET);
771
else
772
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_OFFSET);
773
}
774
775
u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev)
776
{
777
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
778
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_SIZE);
779
else
780
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_SIZE);
781
}
782
783
u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev)
784
{
785
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
786
return REGB_RD32(VPU_HW_BTRS_MTL_VPU_TELEMETRY_ENABLE);
787
else
788
return REGB_RD32(VPU_HW_BTRS_LNL_VPU_TELEMETRY_ENABLE);
789
}
790
791
void ivpu_hw_btrs_global_int_disable(struct ivpu_device *vdev)
792
{
793
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
794
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
795
else
796
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
797
}
798
799
void ivpu_hw_btrs_global_int_enable(struct ivpu_device *vdev)
800
{
801
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
802
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
803
else
804
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
805
}
806
807
void ivpu_hw_btrs_irq_enable(struct ivpu_device *vdev)
808
{
809
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
810
REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, (u32)(~BTRS_MTL_IRQ_MASK));
811
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x0);
812
} else {
813
REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, (u32)(~BTRS_LNL_IRQ_MASK));
814
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x0);
815
}
816
}
817
818
void ivpu_hw_btrs_irq_disable(struct ivpu_device *vdev)
819
{
820
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) {
821
REGB_WR32(VPU_HW_BTRS_MTL_GLOBAL_INT_MASK, 0x1);
822
REGB_WR32(VPU_HW_BTRS_MTL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
823
} else {
824
REGB_WR32(VPU_HW_BTRS_LNL_GLOBAL_INT_MASK, 0x1);
825
REGB_WR32(VPU_HW_BTRS_LNL_LOCAL_INT_MASK, BTRS_IRQ_DISABLE_MASK);
826
}
827
}
828
829
static void diagnose_failure_mtl(struct ivpu_device *vdev)
830
{
831
u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_MTL_IRQ_MASK;
832
833
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, ATS_ERR, reg))
834
ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(VPU_HW_BTRS_MTL_ATS_ERR_LOG_0));
835
836
if (REG_TEST_FLD(VPU_HW_BTRS_MTL_INTERRUPT_STAT, UFI_ERR, reg)) {
837
u32 log = REGB_RD32(VPU_HW_BTRS_MTL_UFI_ERR_LOG);
838
839
ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
840
log, REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, OPCODE, log),
841
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, AXI_ID, log),
842
REG_GET_FLD(VPU_HW_BTRS_MTL_UFI_ERR_LOG, CQ_ID, log));
843
}
844
}
845
846
static void diagnose_failure_lnl(struct ivpu_device *vdev)
847
{
848
u32 reg = REGB_RD32(VPU_HW_BTRS_MTL_INTERRUPT_STAT) & BTRS_LNL_IRQ_MASK;
849
850
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, ATS_ERR, reg)) {
851
ivpu_err(vdev, "ATS_ERR_LOG1 0x%08x ATS_ERR_LOG2 0x%08x\n",
852
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG1),
853
REGB_RD32(VPU_HW_BTRS_LNL_ATS_ERR_LOG2));
854
}
855
856
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI0_ERR, reg))
857
ivpu_err(vdev, "CFI0_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI0_ERR_LOG));
858
859
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, CFI1_ERR, reg))
860
ivpu_err(vdev, "CFI1_ERR_LOG 0x%08x\n", REGB_RD32(VPU_HW_BTRS_LNL_CFI1_ERR_LOG));
861
862
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR0_ERR, reg))
863
ivpu_err(vdev, "IMR_ERR_CFI0 LOW: 0x%08x HIGH: 0x%08x\n",
864
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_LOW),
865
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI0_HIGH));
866
867
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, IMR1_ERR, reg))
868
ivpu_err(vdev, "IMR_ERR_CFI1 LOW: 0x%08x HIGH: 0x%08x\n",
869
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_LOW),
870
REGB_RD32(VPU_HW_BTRS_LNL_IMR_ERR_CFI1_HIGH));
871
872
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, reg))
873
ivpu_err(vdev, "Survivability IRQ\n");
874
}
875
876
void ivpu_hw_btrs_diagnose_failure(struct ivpu_device *vdev)
877
{
878
if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
879
return diagnose_failure_mtl(vdev);
880
else
881
return diagnose_failure_lnl(vdev);
882
}
883
884
int ivpu_hw_btrs_platform_read(struct ivpu_device *vdev)
885
{
886
u32 reg = REGB_RD32(VPU_HW_BTRS_LNL_VPU_STATUS);
887
888
return REG_GET_FLD(VPU_HW_BTRS_LNL_VPU_STATUS, PLATFORM, reg);
889
}
890
891