Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
CTCaer
GitHub Repository: CTCaer/hekate
Path: blob/master/bdk/sec/se.c
2547 views
1
/*
2
* Copyright (c) 2018 naehrwert
3
* Copyright (c) 2018-2026 CTCaer
4
*
5
* This program is free software; you can redistribute it and/or modify it
6
* under the terms and conditions of the GNU General Public License,
7
* version 2, as published by the Free Software Foundation.
8
*
9
* This program is distributed in the hope it will be useful, but WITHOUT
10
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12
* more details.
13
*
14
* You should have received a copy of the GNU General Public License
15
* along with this program. If not, see <http://www.gnu.org/licenses/>.
16
*/
17
18
#include <string.h>
19
20
#include "se.h"
21
#include <memory_map.h>
22
#include <soc/bpmp.h>
23
#include <soc/hw_init.h>
24
#include <soc/pmc.h>
25
#include <soc/timer.h>
26
#include <soc/t210.h>
27
28
typedef struct _se_ll_t
29
{
30
u32 num;
31
u32 addr;
32
u32 size;
33
} se_ll_t;
34
35
se_ll_t ll_src, ll_dst; // Must be u32 aligned.
36
se_ll_t *ll_src_ptr, *ll_dst_ptr;
37
38
static void _se_ls_1bit(void *buf)
39
{
40
u8 *block = (u8 *)buf;
41
u32 carry = 0;
42
43
for (int i = SE_AES_BLOCK_SIZE - 1; i >= 0; i--)
44
{
45
u8 b = block[i];
46
block[i] = (b << 1) | carry;
47
carry = b >> 7;
48
}
49
50
if (carry)
51
block[SE_AES_BLOCK_SIZE - 1] ^= 0x87;
52
}
53
54
static void _se_ls_1bit_le(void *buf)
55
{
56
u32 *block = (u32 *)buf;
57
u32 carry = 0;
58
59
for (u32 i = 0; i < 4; i++)
60
{
61
u32 b = block[i];
62
block[i] = (b << 1) | carry;
63
carry = b >> 31;
64
}
65
66
if (carry)
67
block[0x0] ^= 0x87;
68
}
69
70
static void _se_ll_set(se_ll_t *ll, u32 addr, u32 size)
71
{
72
ll->num = 0;
73
ll->addr = addr;
74
ll->size = size & 0xFFFFFF;
75
}
76
77
static int _se_op_wait()
78
{
79
bool tegra_t210 = hw_get_chip_id() == GP_HIDREV_MAJOR_T210;
80
81
// Wait for operation to be done.
82
while (!(SE(SE_INT_STATUS_REG) & SE_INT_OP_DONE))
83
;
84
85
// Check for errors.
86
if ((SE(SE_INT_STATUS_REG) & SE_INT_ERR_STAT) ||
87
(SE(SE_STATUS_REG) & SE_STATUS_STATE_MASK) != SE_STATUS_STATE_IDLE ||
88
(SE(SE_ERR_STATUS_REG) != 0)
89
)
90
{
91
return 0;
92
}
93
94
// WAR: Coherency flushing.
95
if (ll_dst_ptr)
96
{
97
// Ensure data is out from SE.
98
if (tegra_t210)
99
usleep(15); // Worst case scenario.
100
else
101
{
102
// T210B01 has a status bit for that.
103
u32 retries = 500000;
104
while (SE(SE_STATUS_REG) & SE_STATUS_MEM_IF_BUSY)
105
{
106
if (!retries)
107
return 0;
108
usleep(1);
109
retries--;
110
}
111
}
112
113
// Ensure data is out from AHB.
114
u32 retries = 500000;
115
while (AHB_GIZMO(AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID) & MEM_WRQUE_SE_MST_ID)
116
{
117
if (!retries)
118
return 0;
119
usleep(1);
120
retries--;
121
}
122
}
123
124
return 1;
125
}
126
127
static int _se_execute_finalize()
128
{
129
int res = _se_op_wait();
130
131
// Invalidate data after OP is done.
132
bpmp_mmu_maintenance(BPMP_MMU_MAINT_INVALID_WAY, false);
133
134
return res;
135
}
136
137
static int _se_execute(u32 op, void *dst, u32 dst_size, const void *src, u32 src_size, bool is_oneshot)
138
{
139
if (dst_size > SE_LL_MAX_SIZE || src_size > SE_LL_MAX_SIZE)
140
return 0;
141
142
ll_src_ptr = NULL;
143
ll_dst_ptr = NULL;
144
145
if (src)
146
{
147
ll_src_ptr = &ll_src;
148
_se_ll_set(ll_src_ptr, (u32)src, src_size);
149
}
150
151
if (dst)
152
{
153
ll_dst_ptr = &ll_dst;
154
_se_ll_set(ll_dst_ptr, (u32)dst, dst_size);
155
}
156
157
// Set linked list pointers.
158
SE(SE_IN_LL_ADDR_REG) = (u32)ll_src_ptr;
159
SE(SE_OUT_LL_ADDR_REG) = (u32)ll_dst_ptr;
160
161
// Clear status.
162
SE(SE_ERR_STATUS_REG) = SE(SE_ERR_STATUS_REG);
163
SE(SE_INT_STATUS_REG) = SE(SE_INT_STATUS_REG);
164
165
// Flush data before starting OP.
166
bpmp_mmu_maintenance(BPMP_MMU_MAINT_CLEAN_WAY, false);
167
168
SE(SE_OPERATION_REG) = op;
169
170
if (is_oneshot)
171
return _se_execute_finalize();
172
173
return 1;
174
}
175
176
static int _se_execute_oneshot(u32 op, void *dst, u32 dst_size, const void *src, u32 src_size)
177
{
178
return _se_execute(op, dst, dst_size, src, src_size, true);
179
}
180
181
static int _se_execute_aes_oneshot(void *dst, const void *src, u32 size)
182
{
183
// Set optional memory interface.
184
if (dst >= (void *)DRAM_START && src >= (void *)DRAM_START)
185
SE(SE_CRYPTO_CONFIG_REG) |= SE_CRYPTO_MEMIF(MEMIF_MCCIF);
186
187
u32 size_aligned = ALIGN_DOWN(size, SE_AES_BLOCK_SIZE);
188
u32 size_residue = size % SE_AES_BLOCK_SIZE;
189
int res = 1;
190
191
// Handle initial aligned message.
192
if (size_aligned)
193
{
194
SE(SE_CRYPTO_LAST_BLOCK_REG) = (size >> 4) - 1;
195
196
res = _se_execute_oneshot(SE_OP_START, dst, size_aligned, src, size_aligned);
197
}
198
199
// Handle leftover partial message.
200
if (res && size_residue)
201
{
202
// Copy message to a block sized buffer in case it's partial.
203
u32 block[SE_AES_BLOCK_SIZE / sizeof(u32)] = {0};
204
memcpy(block, src + size_aligned, size_residue);
205
206
// Use updated IV for CBC and OFB. Ignored on others.
207
SE(SE_CRYPTO_CONFIG_REG) |= SE_CRYPTO_IV_SEL(IV_UPDATED);
208
209
SE(SE_CRYPTO_LAST_BLOCK_REG) = (SE_AES_BLOCK_SIZE >> 4) - 1;
210
211
res = _se_execute_oneshot(SE_OP_START, block, SE_AES_BLOCK_SIZE, block, SE_AES_BLOCK_SIZE);
212
213
// Copy result back.
214
memcpy(dst + size_aligned, block, size_residue);
215
}
216
217
return res;
218
}
219
220
static void _se_aes_counter_set(const void *ctr)
221
{
222
u32 data[SE_AES_IV_SIZE / sizeof(u32)];
223
memcpy(data, ctr, SE_AES_IV_SIZE);
224
225
for (u32 i = 0; i < SE_CRYPTO_LINEAR_CTR_REG_COUNT; i++)
226
SE(SE_CRYPTO_LINEAR_CTR_REG + sizeof(u32) * i) = data[i];
227
}
228
229
void se_rsa_acc_ctrl(u32 rs, u32 flags)
230
{
231
if (flags & SE_RSA_KEY_TBL_DIS_KEY_ACCESS_FLAG)
232
SE(SE_RSA_KEYTABLE_ACCESS_REG + sizeof(u32) * rs) =
233
(((flags >> 4) & SE_RSA_KEY_TBL_DIS_KEYUSE_FLAG) | (flags & SE_RSA_KEY_TBL_DIS_KEY_READ_UPDATE_FLAG)) ^
234
SE_RSA_KEY_TBL_DIS_KEY_READ_UPDATE_USE_FLAG;
235
if (flags & SE_RSA_KEY_LOCK_FLAG)
236
SE(SE_RSA_SECURITY_PERKEY_REG) &= ~BIT(rs);
237
}
238
239
void se_key_acc_ctrl(u32 ks, u32 flags)
240
{
241
if (flags & SE_KEY_TBL_DIS_KEY_ACCESS_FLAG)
242
SE(SE_CRYPTO_KEYTABLE_ACCESS_REG + sizeof(u32) * ks) = ~flags;
243
if (flags & SE_KEY_LOCK_FLAG)
244
SE(SE_CRYPTO_SECURITY_PERKEY_REG) &= ~BIT(ks);
245
}
246
247
u32 se_key_acc_ctrl_get(u32 ks)
248
{
249
return SE(SE_CRYPTO_KEYTABLE_ACCESS_REG + sizeof(u32) * ks);
250
}
251
252
void se_aes_key_set(u32 ks, const void *key, u32 size)
253
{
254
u32 data[SE_AES_MAX_KEY_SIZE / sizeof(u32)];
255
memcpy(data, key, size);
256
257
for (u32 i = 0; i < (size / sizeof(u32)); i++)
258
{
259
// QUAD KEYS_4_7 bit is automatically set by PKT macro.
260
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(KEYS_0_3) | SE_KEYTABLE_PKT(i);
261
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = data[i];
262
}
263
}
264
265
void se_aes_iv_set(u32 ks, const void *iv, u32 size)
266
{
267
u32 data[SE_AES_MAX_KEY_SIZE / sizeof(u32)];
268
memcpy(data, iv, size);
269
270
for (u32 i = 0; i < (size / sizeof(u32)); i++)
271
{
272
// QUAD UPDATED_IV bit is automatically set by PKT macro.
273
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(ORIGINAL_IV) | SE_KEYTABLE_PKT(i);
274
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = data[i];
275
}
276
}
277
278
void se_aes_key_get(u32 ks, void *key, u32 size)
279
{
280
u32 data[SE_AES_MAX_KEY_SIZE / sizeof(u32)];
281
282
for (u32 i = 0; i < (size / sizeof(u32)); i++)
283
{
284
// QUAD KEYS_4_7 bit is automatically set by PKT macro.
285
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(KEYS_0_3) | SE_KEYTABLE_PKT(i);
286
data[i] = SE(SE_CRYPTO_KEYTABLE_DATA_REG);
287
}
288
289
memcpy(key, data, size);
290
}
291
292
void se_aes_key_clear(u32 ks)
293
{
294
for (u32 i = 0; i < (SE_AES_MAX_KEY_SIZE / sizeof(u32)); i++)
295
{
296
// QUAD KEYS_4_7 bit is automatically set by PKT macro.
297
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(KEYS_0_3) | SE_KEYTABLE_PKT(i);
298
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = 0;
299
}
300
}
301
302
void se_aes_iv_clear(u32 ks)
303
{
304
for (u32 i = 0; i < (SE_AES_MAX_KEY_SIZE / sizeof(u32)); i++)
305
{
306
// QUAD UPDATED_IV bit is automatically set by PKT macro.
307
SE(SE_CRYPTO_KEYTABLE_ADDR_REG) = SE_KEYTABLE_SLOT(ks) | SE_KEYTABLE_QUAD(ORIGINAL_IV) | SE_KEYTABLE_PKT(i);
308
SE(SE_CRYPTO_KEYTABLE_DATA_REG) = 0;
309
}
310
}
311
312
int se_aes_unwrap_key(u32 ks_dst, u32 ks_src, const void *seed)
313
{
314
SE(SE_CONFIG_REG) = SE_CONFIG_DEC_MODE(MODE_KEY128) | SE_CONFIG_DEC_ALG(ALG_AES_DEC) | SE_CONFIG_DST(DST_KEYTABLE);
315
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks_src) | SE_CRYPTO_CORE_SEL(CORE_DECRYPT);
316
SE(SE_CRYPTO_LAST_BLOCK_REG) = (SE_AES_BLOCK_SIZE >> 4) - 1;
317
SE(SE_CRYPTO_KEYTABLE_DST_REG) = SE_KEYTABLE_DST_KEY_INDEX(ks_dst) | SE_KEYTABLE_DST_WORD_QUAD(KEYS_0_3);
318
319
return _se_execute_oneshot(SE_OP_START, NULL, 0, seed, SE_KEY_128_SIZE);
320
}
321
322
int se_aes_crypt_ecb(u32 ks, int enc, void *dst, const void *src, u32 size)
323
{
324
if (enc)
325
{
326
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
327
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
328
SE_CRYPTO_XOR_POS(XOR_BYPASS);
329
}
330
else
331
{
332
SE(SE_CONFIG_REG) = SE_CONFIG_DEC_MODE(MODE_KEY128) | SE_CONFIG_DEC_ALG(ALG_AES_DEC) | SE_CONFIG_DST(DST_MEMORY);
333
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_CORE_SEL(CORE_DECRYPT) |
334
SE_CRYPTO_XOR_POS(XOR_BYPASS);
335
}
336
337
return _se_execute_aes_oneshot(dst, src, size);
338
}
339
340
int se_aes_crypt_cbc(u32 ks, int enc, void *dst, const void *src, u32 size)
341
{
342
if (enc)
343
{
344
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
345
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_VCTRAM_SEL(VCTRAM_AESOUT) |
346
SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | SE_CRYPTO_XOR_POS(XOR_TOP);
347
}
348
else
349
{
350
SE(SE_CONFIG_REG) = SE_CONFIG_DEC_MODE(MODE_KEY128) | SE_CONFIG_DEC_ALG(ALG_AES_DEC) | SE_CONFIG_DST(DST_MEMORY);
351
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_VCTRAM_SEL(VCTRAM_PREVMEM) |
352
SE_CRYPTO_CORE_SEL(CORE_DECRYPT) | SE_CRYPTO_XOR_POS(XOR_BOTTOM);
353
}
354
355
return _se_execute_aes_oneshot(dst, src, size);
356
}
357
358
int se_aes_crypt_ofb(u32 ks, void *dst, const void *src, u32 size)
359
{
360
SE(SE_SPARE_REG) = SE_INPUT_NONCE_LE;
361
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
362
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_INPUT_SEL(INPUT_AESOUT) |
363
SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | SE_CRYPTO_XOR_POS(XOR_BOTTOM);
364
365
return _se_execute_aes_oneshot(dst, src, size);
366
}
367
368
int se_aes_crypt_ctr(u32 ks, void *dst, const void *src, u32 size, void *ctr)
369
{
370
SE(SE_SPARE_REG) = SE_INPUT_NONCE_LE;
371
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
372
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(ks) | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) |
373
SE_CRYPTO_XOR_POS(XOR_BOTTOM) | SE_CRYPTO_INPUT_SEL(INPUT_LNR_CTR) |
374
SE_CRYPTO_CTR_CNTN(1);
375
376
_se_aes_counter_set(ctr);
377
378
return _se_execute_aes_oneshot(dst, src, size);
379
}
380
381
int se_aes_crypt_xts_sec(u32 tweak_ks, u32 crypt_ks, int enc, u64 sec, void *dst, void *src, u32 secsize)
382
{
383
int res = 0;
384
u32 tmp[SE_AES_BLOCK_SIZE / sizeof(u32)];
385
u8 *tweak = (u8 *)tmp;
386
u8 *pdst = (u8 *)dst;
387
u8 *psrc = (u8 *)src;
388
389
// Generate tweak.
390
for (int i = SE_AES_BLOCK_SIZE - 1; i >= 0; i--)
391
{
392
tweak[i] = sec & 0xFF;
393
sec >>= 8;
394
}
395
if (!se_aes_crypt_ecb(tweak_ks, ENCRYPT, tweak, tweak, SE_AES_BLOCK_SIZE))
396
goto out;
397
398
// We are assuming a 0x10-aligned sector size in this implementation.
399
for (u32 i = 0; i < secsize / SE_AES_BLOCK_SIZE; i++)
400
{
401
for (u32 j = 0; j < SE_AES_BLOCK_SIZE; j++)
402
pdst[j] = psrc[j] ^ tweak[j];
403
if (!se_aes_crypt_ecb(crypt_ks, enc, pdst, pdst, SE_AES_BLOCK_SIZE))
404
goto out;
405
for (u32 j = 0; j < SE_AES_BLOCK_SIZE; j++)
406
pdst[j] = pdst[j] ^ tweak[j];
407
_se_ls_1bit(tweak);
408
psrc += SE_AES_BLOCK_SIZE;
409
pdst += SE_AES_BLOCK_SIZE;
410
}
411
412
res = 1;
413
414
out:
415
return res;
416
}
417
418
int se_aes_crypt_xts_sec_nx(u32 tweak_ks, u32 crypt_ks, int enc, u64 sec, u8 *tweak, bool regen_tweak, u32 tweak_exp, void *dst, void *src, u32 sec_size)
419
{
420
u32 *pdst = (u32 *)dst;
421
u32 *psrc = (u32 *)src;
422
u32 *ptweak = (u32 *)tweak;
423
424
if (regen_tweak)
425
{
426
for (int i = SE_AES_BLOCK_SIZE - 1; i >= 0; i--)
427
{
428
tweak[i] = sec & 0xFF;
429
sec >>= 8;
430
}
431
if (!se_aes_crypt_ecb(tweak_ks, ENCRYPT, tweak, tweak, SE_AES_BLOCK_SIZE))
432
return 0;
433
}
434
435
// tweak_exp allows using a saved tweak to reduce _se_ls_1bit_le calls.
436
for (u32 i = 0; i < (tweak_exp << 5); i++)
437
_se_ls_1bit_le(tweak);
438
439
u8 orig_tweak[SE_KEY_128_SIZE] __attribute__((aligned(4)));
440
memcpy(orig_tweak, tweak, SE_KEY_128_SIZE);
441
442
// We are assuming a 16 sector aligned size in this implementation.
443
for (u32 i = 0; i < (sec_size >> 4); i++)
444
{
445
for (u32 j = 0; j < (SE_AES_BLOCK_SIZE / sizeof(u32)); j++)
446
pdst[j] = psrc[j] ^ ptweak[j];
447
448
_se_ls_1bit_le(tweak);
449
psrc += sizeof(u32);
450
pdst += sizeof(u32);
451
}
452
453
if (!se_aes_crypt_ecb(crypt_ks, enc, dst, dst, sec_size))
454
return 0;
455
456
pdst = (u32 *)dst;
457
ptweak = (u32 *)orig_tweak;
458
for (u32 i = 0; i < (sec_size >> 4); i++)
459
{
460
for (u32 j = 0; j < (SE_AES_BLOCK_SIZE / sizeof(u32)); j++)
461
pdst[j] = pdst[j] ^ ptweak[j];
462
463
_se_ls_1bit_le(orig_tweak);
464
pdst += sizeof(u32);
465
}
466
467
return 1;
468
}
469
470
int se_aes_crypt_xts(u32 tweak_ks, u32 crypt_ks, int enc, u64 sec, void *dst, void *src, u32 secsize, u32 num_secs)
471
{
472
u8 *pdst = (u8 *)dst;
473
u8 *psrc = (u8 *)src;
474
475
for (u32 i = 0; i < num_secs; i++)
476
if (!se_aes_crypt_xts_sec(tweak_ks, crypt_ks, enc, sec + i, pdst + secsize * i, psrc + secsize * i, secsize))
477
return 0;
478
479
return 1;
480
}
481
482
static void _se_sha_hash_256_get_hash(void *hash)
483
{
484
// Copy output hash.
485
u32 hash32[SE_SHA_256_SIZE / sizeof(u32)];
486
for (u32 i = 0; i < (SE_SHA_256_SIZE / sizeof(u32)); i++)
487
hash32[i] = byte_swap_32(SE(SE_HASH_RESULT_REG + sizeof(u32) * i));
488
memcpy(hash, hash32, SE_SHA_256_SIZE);
489
}
490
491
static int _se_sha_hash_256(void *hash, u64 total_size, const void *src, u32 src_size, bool is_oneshot)
492
{
493
// Src size of 0 is not supported, so return null string sha256.
494
if (!src_size)
495
{
496
const u8 null_hash[SE_SHA_256_SIZE] = {
497
0xE3, 0xB0, 0xC4, 0x42, 0x98, 0xFC, 0x1C, 0x14, 0x9A, 0xFB, 0xF4, 0xC8, 0x99, 0x6F, 0xB9, 0x24,
498
0x27, 0xAE, 0x41, 0xE4, 0x64, 0x9B, 0x93, 0x4C, 0xA4, 0x95, 0x99, 0x1B, 0x78, 0x52, 0xB8, 0x55
499
};
500
memcpy(hash, null_hash, SE_SHA_256_SIZE);
501
return 1;
502
}
503
504
// Increase leftover size if not last message. (Engine will always stop at src_size.)
505
u32 msg_left = src_size;
506
if (total_size < src_size)
507
msg_left++;
508
509
// Setup config for SHA256.
510
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_SHA256) | SE_CONFIG_ENC_ALG(ALG_SHA) | SE_CONFIG_DST(DST_HASHREG);
511
512
// Set total size: BITS(total_size), up to 2 EB.
513
SE(SE_SHA_MSG_LENGTH_0_REG) = (u32)(total_size << 3);
514
SE(SE_SHA_MSG_LENGTH_1_REG) = (u32)(total_size >> 29);
515
SE(SE_SHA_MSG_LENGTH_2_REG) = 0;
516
SE(SE_SHA_MSG_LENGTH_3_REG) = 0;
517
518
// Set leftover size: BITS(src_size).
519
SE(SE_SHA_MSG_LEFT_0_REG) = (u32)(msg_left << 3);
520
SE(SE_SHA_MSG_LEFT_1_REG) = (u32)(msg_left >> 29);
521
SE(SE_SHA_MSG_LEFT_2_REG) = 0;
522
SE(SE_SHA_MSG_LEFT_3_REG) = 0;
523
524
// Set config based on init or partial continuation.
525
if (total_size == src_size || !total_size)
526
SE(SE_SHA_CONFIG_REG) = SHA_INIT_HASH;
527
else
528
SE(SE_SHA_CONFIG_REG) = SHA_CONTINUE;
529
530
// Trigger the operation. src vs total size decides if it's partial.
531
int res = _se_execute(SE_OP_START, NULL, 0, src, src_size, is_oneshot);
532
533
if (res && is_oneshot)
534
_se_sha_hash_256_get_hash(hash);
535
536
return res;
537
}
538
539
int se_sha_hash_256_async(void *hash, const void *src, u32 size)
540
{
541
return _se_sha_hash_256(hash, size, src, size, false);
542
}
543
544
int se_sha_hash_256_oneshot(void *hash, const void *src, u32 size)
545
{
546
return _se_sha_hash_256(hash, size, src, size, true);
547
}
548
549
int se_sha_hash_256_partial_start(void *hash, const void *src, u32 size, bool is_oneshot)
550
{
551
// Check if aligned SHA256 block size.
552
if (size % SE_SHA2_MIN_BLOCK_SIZE)
553
return 0;
554
555
return _se_sha_hash_256(hash, 0, src, size, is_oneshot);
556
}
557
558
int se_sha_hash_256_partial_update(void *hash, const void *src, u32 size, bool is_oneshot)
559
{
560
// Check if aligned to SHA256 block size.
561
if (size % SE_SHA2_MIN_BLOCK_SIZE)
562
return 0;
563
564
return _se_sha_hash_256(hash, size - 1, src, size, is_oneshot);
565
}
566
567
int se_sha_hash_256_partial_end(void *hash, u64 total_size, const void *src, u32 src_size, bool is_oneshot)
568
{
569
return _se_sha_hash_256(hash, total_size, src, src_size, is_oneshot);
570
}
571
572
int se_sha_hash_256_finalize(void *hash)
573
{
574
int res = _se_execute_finalize();
575
576
_se_sha_hash_256_get_hash(hash);
577
578
return res;
579
}
580
581
int se_rng_pseudo(void *dst, u32 size)
582
{
583
// Setup config for SP 800-90 PRNG.
584
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_DST(DST_MEMORY);
585
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_XOR_POS(XOR_BYPASS) | SE_CRYPTO_INPUT_SEL(INPUT_RANDOM);
586
SE(SE_RNG_CONFIG_REG) = SE_RNG_CONFIG_SRC(SRC_ENTROPY) | SE_RNG_CONFIG_MODE(MODE_NORMAL);
587
SE(SE_RNG_SRC_CONFIG_REG) |= SE_RNG_SRC_CONFIG_ENTR_SRC(RO_ENTR_ENABLE); // DRBG. Depends on ENTROPY clock.
588
SE(SE_RNG_RESEED_INTERVAL_REG) = 4096;
589
590
u32 size_aligned = ALIGN_DOWN(size, SE_RNG_BLOCK_SIZE);
591
u32 size_residue = size % SE_RNG_BLOCK_SIZE;
592
int res = 0;
593
594
// Handle initial aligned message.
595
if (size_aligned)
596
{
597
SE(SE_CRYPTO_LAST_BLOCK_REG) = (size >> 4) - 1;
598
599
res = _se_execute_oneshot(SE_OP_START, dst, size_aligned, NULL, 0);
600
}
601
602
// Handle leftover partial message.
603
if (res && size_residue)
604
{
605
// Copy message to a block sized buffer in case it's partial.
606
u32 block[SE_RNG_BLOCK_SIZE / sizeof(u32)] = {0};
607
608
SE(SE_CRYPTO_LAST_BLOCK_REG) = (SE_AES_BLOCK_SIZE >> 4) - 1;
609
610
res = _se_execute_oneshot(SE_OP_START, block, SE_RNG_BLOCK_SIZE, NULL, 0);
611
612
// Copy result back.
613
if (res)
614
memcpy(dst + size_aligned, block, size_residue);
615
}
616
617
return res;
618
}
619
620
void se_aes_ctx_get_keys(u8 *buf, u8 *keys, u32 keysize)
621
{
622
u8 *aligned_buf = (u8 *)ALIGN((u32)buf, 0x40);
623
624
// Set Secure Random Key.
625
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_RNG) | SE_CONFIG_DST(DST_SRK);
626
SE(SE_CRYPTO_CONFIG_REG) = SE_CRYPTO_KEY_INDEX(0) | SE_CRYPTO_CORE_SEL(CORE_ENCRYPT) | SE_CRYPTO_INPUT_SEL(INPUT_RANDOM);
627
SE(SE_RNG_CONFIG_REG) = SE_RNG_CONFIG_SRC(SRC_ENTROPY) | SE_RNG_CONFIG_MODE(MODE_FORCE_RESEED);
628
SE(SE_CRYPTO_LAST_BLOCK) = 0;
629
_se_execute_oneshot(SE_OP_START, NULL, 0, NULL, 0);
630
631
// Save AES keys.
632
SE(SE_CONFIG_REG) = SE_CONFIG_ENC_MODE(MODE_KEY128) | SE_CONFIG_ENC_ALG(ALG_AES_ENC) | SE_CONFIG_DST(DST_MEMORY);
633
634
for (u32 i = 0; i < SE_AES_KEYSLOT_COUNT; i++)
635
{
636
SE(SE_CONTEXT_SAVE_CONFIG_REG) = SE_CONTEXT_SRC(AES_KEYTABLE) | SE_KEYTABLE_DST_KEY_INDEX(i) |
637
SE_CONTEXT_AES_KEY_INDEX(0) | SE_CONTEXT_AES_WORD_QUAD(KEYS_0_3);
638
639
SE(SE_CRYPTO_LAST_BLOCK) = 0;
640
_se_execute_oneshot(SE_OP_CTX_SAVE, aligned_buf, SE_AES_BLOCK_SIZE, NULL, 0);
641
memcpy(keys + i * keysize, aligned_buf, SE_AES_BLOCK_SIZE);
642
643
if (keysize > SE_KEY_128_SIZE)
644
{
645
SE(SE_CONTEXT_SAVE_CONFIG_REG) = SE_CONTEXT_SRC(AES_KEYTABLE) | SE_KEYTABLE_DST_KEY_INDEX(i) |
646
SE_CONTEXT_AES_KEY_INDEX(0) | SE_CONTEXT_AES_WORD_QUAD(KEYS_4_7);
647
648
SE(SE_CRYPTO_LAST_BLOCK) = 0;
649
_se_execute_oneshot(SE_OP_CTX_SAVE, aligned_buf, SE_AES_BLOCK_SIZE, NULL, 0);
650
memcpy(keys + i * keysize + SE_AES_BLOCK_SIZE, aligned_buf, SE_AES_BLOCK_SIZE);
651
}
652
}
653
654
// Save SRK to PMC secure scratches.
655
SE(SE_CONTEXT_SAVE_CONFIG_REG) = SE_CONTEXT_SRC(SRK);
656
SE(SE_CRYPTO_LAST_BLOCK) = 0;
657
_se_execute_oneshot(SE_OP_CTX_SAVE, NULL, 0, NULL, 0);
658
659
// End context save.
660
SE(SE_CONFIG_REG) = 0;
661
_se_execute_oneshot(SE_OP_CTX_SAVE, NULL, 0, NULL, 0);
662
663
// Get SRK.
664
u32 srk[4];
665
srk[0] = PMC(APBDEV_PMC_SECURE_SCRATCH4);
666
srk[1] = PMC(APBDEV_PMC_SECURE_SCRATCH5);
667
srk[2] = PMC(APBDEV_PMC_SECURE_SCRATCH6);
668
srk[3] = PMC(APBDEV_PMC_SECURE_SCRATCH7);
669
670
// Decrypt context.
671
se_aes_key_set(3, srk, SE_KEY_128_SIZE);
672
se_aes_crypt_cbc(3, DECRYPT, keys, keys, SE_AES_KEYSLOT_COUNT * keysize);
673
se_aes_key_clear(3);
674
}
675
676
int se_aes_hash_cmac(u32 ks, void *hash, const void *src, u32 size)
677
{
678
u32 tmp1[SE_KEY_128_SIZE / sizeof(u32)] = {0};
679
u32 tmp2[SE_AES_BLOCK_SIZE / sizeof(u32)] = {0};
680
u8 *subkey = (u8 *)tmp1;
681
u8 *last_block = (u8 *)tmp2;
682
683
// Generate sub key (CBC with zeroed IV, basically ECB).
684
se_aes_iv_clear(ks);
685
if (!se_aes_crypt_cbc(ks, ENCRYPT, subkey, subkey, SE_KEY_128_SIZE))
686
return 0;
687
688
// Generate K1 subkey.
689
_se_ls_1bit(subkey);
690
if (size & 0xF)
691
_se_ls_1bit(subkey); // Convert to K2.
692
693
// Switch to hash register. The rest of the config is already set.
694
SE(SE_CONFIG_REG) |= SE_CONFIG_DST(DST_HASHREG);
695
SE(SE_CRYPTO_CONFIG_REG) |= SE_CRYPTO_HASH(HASH_ENABLE);
696
697
// Initial blocks.
698
u32 num_blocks = (size + 0xF) >> 4;
699
if (num_blocks > 1)
700
{
701
SE(SE_CRYPTO_LAST_BLOCK_REG) = num_blocks - 2;
702
703
if (!_se_execute_oneshot(SE_OP_START, NULL, 0, src, size))
704
return 0;
705
706
// Use updated IV for next OP as a continuation.
707
SE(SE_CRYPTO_CONFIG_REG) |= SE_CRYPTO_IV_SEL(IV_UPDATED);
708
}
709
710
// Last block.
711
if (size & 0xF)
712
{
713
memcpy(last_block, src + (size & (~0xF)), size & 0xF);
714
last_block[size & 0xF] = 0x80;
715
}
716
else if (size >= SE_AES_BLOCK_SIZE)
717
memcpy(last_block, src + size - SE_AES_BLOCK_SIZE, SE_AES_BLOCK_SIZE);
718
719
for (u32 i = 0; i < SE_KEY_128_SIZE; i++)
720
last_block[i] ^= subkey[i];
721
722
SE(SE_CRYPTO_LAST_BLOCK_REG) = (SE_AES_BLOCK_SIZE >> 4) - 1;
723
724
int res = _se_execute_oneshot(SE_OP_START, NULL, 0, last_block, SE_AES_BLOCK_SIZE);
725
726
// Copy output hash.
727
if (res)
728
{
729
u32 *hash32 = (u32 *)hash;
730
for (u32 i = 0; i < (SE_AES_CMAC_DIGEST_SIZE / sizeof(u32)); i++)
731
hash32[i] = SE(SE_HASH_RESULT_REG + sizeof(u32) * i);
732
}
733
734
return res;
735
}
736
737