1 /*
2 * Copyright (c) 2017-2021 The Linux Foundation. All rights reserved.
3 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for
6 * any purpose with or without fee is hereby granted, provided that the
7 * above copyright notice and this permission notice appear in all
8 * copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
13 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
14 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
15 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
16 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
17 * PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /**
21 * DOC: qdf_crypto.c
22 *
23 * This source file contains linux specific definitions for QDF crypto APIs
24 */
25
26 /* Include Files */
27 #include "qdf_crypto.h"
28 #include <linux/export.h>
29 #include <crypto/hash.h>
30 #include <crypto/aes.h>
31 #include <crypto/skcipher.h>
32 #include <crypto/aead.h>
33 #include <linux/ieee80211.h>
34 #include <qdf_module.h>
35
36 /* Function Definitions and Documentation */
37 #define MAX_HMAC_ELEMENT_CNT 10
38
39 /*
40 * xor: API to calculate xor
41 * @a: first variable
42 * @b: second variable
43 * @len: length of variables
44 */
xor(uint8_t * a,const uint8_t * b,size_t len)45 static void xor(uint8_t *a, const uint8_t *b, size_t len)
46 {
47 unsigned int i;
48
49 for (i = 0; i < len; i++)
50 a[i] ^= b[i];
51 }
52
qdf_get_hash(uint8_t * type,uint8_t element_cnt,uint8_t * addr[],uint32_t * addr_len,int8_t * hash)53 int qdf_get_hash(uint8_t *type,
54 uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len,
55 int8_t *hash)
56 {
57 return qdf_get_hmac_hash(type, NULL, 0, element_cnt,
58 addr, addr_len, hash);
59 }
60
qdf_get_hmac_hash(uint8_t * type,uint8_t * key,uint32_t keylen,uint8_t element_cnt,uint8_t * addr[],uint32_t * addr_len,int8_t * hash)61 int qdf_get_hmac_hash(uint8_t *type, uint8_t *key,
62 uint32_t keylen,
63 uint8_t element_cnt, uint8_t *addr[], uint32_t *addr_len,
64 int8_t *hash)
65 {
66 int i;
67 size_t src_len[MAX_HMAC_ELEMENT_CNT];
68
69 if (element_cnt > MAX_HMAC_ELEMENT_CNT) {
70 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
71 FL("Invalid element count %d"), element_cnt);
72 return -EINVAL;
73 }
74
75 for (i = 0; i < element_cnt; i++)
76 src_len[i] = addr_len[i];
77
78 return qdf_get_keyed_hash(type, key, keylen, (const uint8_t **)addr,
79 src_len, element_cnt, hash);
80 }
81
82 QDF_STATUS
qdf_default_hmac_sha256_kdf(uint8_t * secret,uint32_t secret_len,uint8_t * label,uint8_t * optional_data,uint32_t optional_data_len,uint8_t * key,uint32_t keylen)83 qdf_default_hmac_sha256_kdf(uint8_t *secret, uint32_t secret_len,
84 uint8_t *label, uint8_t *optional_data,
85 uint32_t optional_data_len, uint8_t *key,
86 uint32_t keylen)
87 {
88 uint8_t tmp_hash[SHA256_DIGEST_SIZE] = {0};
89 uint8_t count = 1;
90 uint8_t *addr[4];
91 uint32_t len[4];
92 uint32_t current_position = 0, remaining_data = SHA256_DIGEST_SIZE;
93
94 addr[0] = tmp_hash;
95 len[0] = SHA256_DIGEST_SIZE;
96 addr[1] = label;
97 len[1] = strlen(label) + 1;
98 addr[2] = optional_data;
99 len[2] = optional_data_len;
100 addr[3] = &count;
101 len[3] = 1;
102
103 if (keylen == 0 ||
104 (keylen > (WLAN_MAX_PRF_INTERATIONS_COUNT * SHA256_DIGEST_SIZE))) {
105 qdf_err("invalid key length %d", keylen);
106 return QDF_STATUS_E_FAILURE;
107 }
108
109 /* Create T1 */
110 if (qdf_get_hmac_hash(HMAC_SHA256_CRYPTO_TYPE, secret, secret_len, 3,
111 &addr[1], &len[1], tmp_hash) < 0) {
112 qdf_err("failed to get hmac hash");
113 return QDF_STATUS_E_FAILURE;
114 }
115
116 /* Update hash from tmp_hash */
117 qdf_mem_copy(key + current_position, tmp_hash, remaining_data);
118 current_position += remaining_data;
119
120 for (count = 2; current_position < keylen; count++) {
121 remaining_data = keylen - current_position;
122 if (remaining_data > SHA256_DIGEST_SIZE)
123 remaining_data = SHA256_DIGEST_SIZE;
124
125 /* Create T-n */
126 if (qdf_get_hmac_hash(HMAC_SHA256_CRYPTO_TYPE, secret,
127 secret_len, 4, addr, len, tmp_hash) < 0) {
128 qdf_err("failed to get hmac hash");
129 return QDF_STATUS_E_FAILURE;
130 }
131 /* Update hash from tmp_hash */
132 qdf_mem_copy(key + current_position, tmp_hash, remaining_data);
133 current_position += remaining_data;
134 }
135
136 return QDF_STATUS_SUCCESS;
137 }
138
139 /* qdf_update_dbl from RFC 5297. Length of d is AES_BLOCK_SIZE (128 bits) */
qdf_update_dbl(uint8_t * d)140 void qdf_update_dbl(uint8_t *d)
141 {
142 int i;
143 uint8_t msb, msb_prev = 0;
144
145 /* left shift by 1 */
146 for (i = AES_BLOCK_SIZE - 1; i >= 0; i--) {
147 msb = d[i] & 0x80;
148 d[i] = d[i] << 1;
149 d[i] += msb_prev ? 1 : 0;
150 msb_prev = msb;
151 }
152
153 if (msb)
154 d[AES_BLOCK_SIZE - 1] ^= 0x87;
155 }
156
xor_128(const uint8_t * a,const uint8_t * b,uint8_t * out)157 static inline void xor_128(const uint8_t *a, const uint8_t *b, uint8_t *out)
158 {
159 uint8_t i;
160
161 for (i = 0; i < AES_BLOCK_SIZE; i++)
162 out[i] = a[i] ^ b[i];
163 }
164
leftshift_onebit(const uint8_t * input,uint8_t * output)165 static inline void leftshift_onebit(const uint8_t *input, uint8_t *output)
166 {
167 int i, overflow = 0;
168
169 for (i = (AES_BLOCK_SIZE - 1); i >= 0; i--) {
170 output[i] = input[i] << 1;
171 output[i] |= overflow;
172 overflow = (input[i] & 0x80) ? 1 : 0;
173 }
174 }
175
176 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
177 static void
generate_subkey(struct crypto_aes_ctx * aes_ctx,uint8_t * k1,uint8_t * k2)178 generate_subkey(struct crypto_aes_ctx *aes_ctx, uint8_t *k1, uint8_t *k2)
179 {
180 uint8_t l[AES_BLOCK_SIZE] = {
181 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
183 };
184 uint8_t tmp[AES_BLOCK_SIZE];
185 const uint8_t const_rb[AES_BLOCK_SIZE] = {
186 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87
188 };
189 const uint8_t const_zero[AES_BLOCK_SIZE] = {
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
192 };
193
194 aes_encrypt(aes_ctx, l, const_zero);
195
196 if ((l[0] & 0x80) == 0) { /* If MSB(l) = 0, then k1 = l << 1 */
197 leftshift_onebit(l, k1);
198 } else { /* Else k1 = ( l << 1 ) (+) Rb */
199 leftshift_onebit(l, tmp);
200 xor_128(tmp, const_rb, k1);
201 }
202
203 if ((k1[0] & 0x80) == 0) {
204 leftshift_onebit(k1, k2);
205 } else {
206 leftshift_onebit(k1, tmp);
207 xor_128(tmp, const_rb, k2);
208 }
209 }
210 #else
211 static void
generate_subkey(struct crypto_cipher * tfm,uint8_t * k1,uint8_t * k2)212 generate_subkey(struct crypto_cipher *tfm, uint8_t *k1, uint8_t *k2)
213 {
214 uint8_t l[AES_BLOCK_SIZE], tmp[AES_BLOCK_SIZE];
215 const uint8_t const_rb[AES_BLOCK_SIZE] = {
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87
218 };
219 const uint8_t const_zero[AES_BLOCK_SIZE] = {
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
222 };
223
224 crypto_cipher_encrypt_one(tfm, l, const_zero);
225
226 if ((l[0] & 0x80) == 0) { /* If MSB(l) = 0, then k1 = l << 1 */
227 leftshift_onebit(l, k1);
228 } else { /* Else k1 = ( l << 1 ) (+) Rb */
229 leftshift_onebit(l, tmp);
230 xor_128(tmp, const_rb, k1);
231 }
232
233 if ((k1[0] & 0x80) == 0) {
234 leftshift_onebit(k1, k2);
235 } else {
236 leftshift_onebit(k1, tmp);
237 xor_128(tmp, const_rb, k2);
238 }
239 }
240 #endif
241
padding(const uint8_t * lastb,uint8_t * pad,uint16_t length)242 static inline void padding(const uint8_t *lastb, uint8_t *pad, uint16_t length)
243 {
244 uint8_t j;
245
246 /* original last block */
247 for (j = 0; j < AES_BLOCK_SIZE; j++) {
248 if (j < length)
249 pad[j] = lastb[j];
250 else if (j == length)
251 pad[j] = 0x80;
252 else
253 pad[j] = 0x00;
254 }
255 }
256
257 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0))
qdf_crypto_aes_128_cmac(const uint8_t * key,const uint8_t * data,uint16_t len,uint8_t * mic)258 int qdf_crypto_aes_128_cmac(const uint8_t *key, const uint8_t *data,
259 uint16_t len, uint8_t *mic)
260 {
261 uint8_t x[AES_BLOCK_SIZE], y[AES_BLOCK_SIZE];
262 uint8_t m_last[AES_BLOCK_SIZE], padded[AES_BLOCK_SIZE];
263 uint8_t k1[AES_KEYSIZE_128], k2[AES_KEYSIZE_128];
264 int cmp_blk;
265 int i, num_block = (len + 15) / AES_BLOCK_SIZE;
266 struct crypto_aes_ctx aes_ctx;
267 int ret;
268
269 /*
270 * Calculate MIC and then copy
271 */
272 ret = aes_expandkey(&aes_ctx, key, AES_KEYSIZE_128);
273 if (ret) {
274 qdf_err("aes_expandkey failed (%d)", ret);
275 return ret;
276 }
277
278 generate_subkey(&aes_ctx, k1, k2);
279
280 if (num_block == 0) {
281 num_block = 1;
282 cmp_blk = 0;
283 } else {
284 cmp_blk = ((len % AES_BLOCK_SIZE) == 0) ? 1 : 0;
285 }
286
287 if (cmp_blk) {
288 /* Last block is complete block */
289 xor_128(&data[AES_BLOCK_SIZE * (num_block - 1)], k1, m_last);
290 } else {
291 /* Last block is not complete block */
292 padding(&data[AES_BLOCK_SIZE * (num_block - 1)], padded,
293 len % AES_BLOCK_SIZE);
294 xor_128(padded, k2, m_last);
295 }
296
297 for (i = 0; i < AES_BLOCK_SIZE; i++)
298 x[i] = 0;
299
300 for (i = 0; i < (num_block - 1); i++) {
301 /* y = Mi (+) x */
302 xor_128(x, &data[AES_BLOCK_SIZE * i], y);
303 /* x = AES-128(KEY, y) */
304 aes_encrypt(&aes_ctx, x, y);
305 }
306
307 xor_128(x, m_last, y);
308 aes_encrypt(&aes_ctx, x, y);
309 memzero_explicit(&aes_ctx, sizeof(aes_ctx));
310
311 memcpy(mic, x, CMAC_TLEN);
312
313 return 0;
314 }
315 #else
qdf_crypto_aes_128_cmac(const uint8_t * key,const uint8_t * data,uint16_t len,uint8_t * mic)316 int qdf_crypto_aes_128_cmac(const uint8_t *key, const uint8_t *data,
317 uint16_t len, uint8_t *mic)
318 {
319 uint8_t x[AES_BLOCK_SIZE], y[AES_BLOCK_SIZE];
320 uint8_t m_last[AES_BLOCK_SIZE], padded[AES_BLOCK_SIZE];
321 uint8_t k1[AES_KEYSIZE_128], k2[AES_KEYSIZE_128];
322 int cmp_blk;
323 int i, num_block = (len + 15) / AES_BLOCK_SIZE;
324 struct crypto_cipher *tfm;
325 int ret;
326
327 /*
328 * Calculate MIC and then copy
329 */
330 tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
331 if (IS_ERR(tfm)) {
332 ret = PTR_ERR(tfm);
333 qdf_err("crypto_alloc_cipher failed (%d)", ret);
334 return ret;
335 }
336
337 ret = crypto_cipher_setkey(tfm, key, AES_KEYSIZE_128);
338 if (ret) {
339 qdf_err("crypto_cipher_setkey failed (%d)", ret);
340 crypto_free_cipher(tfm);
341 return ret;
342 }
343
344 generate_subkey(tfm, k1, k2);
345
346 if (num_block == 0) {
347 num_block = 1;
348 cmp_blk = 0;
349 } else {
350 cmp_blk = ((len % AES_BLOCK_SIZE) == 0) ? 1 : 0;
351 }
352
353 if (cmp_blk) {
354 /* Last block is complete block */
355 xor_128(&data[AES_BLOCK_SIZE * (num_block - 1)], k1, m_last);
356 } else {
357 /* Last block is not complete block */
358 padding(&data[AES_BLOCK_SIZE * (num_block - 1)], padded,
359 len % AES_BLOCK_SIZE);
360 xor_128(padded, k2, m_last);
361 }
362
363 for (i = 0; i < AES_BLOCK_SIZE; i++)
364 x[i] = 0;
365
366 for (i = 0; i < (num_block - 1); i++) {
367 /* y = Mi (+) x */
368 xor_128(x, &data[AES_BLOCK_SIZE * i], y);
369 /* x = AES-128(KEY, y) */
370 crypto_cipher_encrypt_one(tfm, x, y);
371 }
372
373 xor_128(x, m_last, y);
374 crypto_cipher_encrypt_one(tfm, x, y);
375
376 crypto_free_cipher(tfm);
377
378 memcpy(mic, x, CMAC_TLEN);
379
380 return 0;
381 }
382 #endif
383
384 /**
385 * set_desc_flags() - set flags variable in the shash_desc struct
386 * @desc: pointer to shash_desc struct
387 * @tfm: pointer to crypto_shash struct
388 *
389 * Set the flags variable in the shash_desc struct by getting the flag
390 * from the crypto_hash struct. The flag is not actually used, prompting
391 * its removal from kernel code in versions 5.2 and above. Thus, for
392 * versions 5.2 and above, do not set the flag variable of shash_desc.
393 */
394 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0))
set_desc_flags(struct shash_desc * desc,struct crypto_shash * tfm)395 static void set_desc_flags(struct shash_desc *desc, struct crypto_shash *tfm)
396 {
397 desc->flags = crypto_shash_get_flags(tfm);
398 }
399 #else
set_desc_flags(struct shash_desc * desc,struct crypto_shash * tfm)400 static void set_desc_flags(struct shash_desc *desc, struct crypto_shash *tfm)
401 {
402 }
403 #endif
404
qdf_get_keyed_hash(const char * alg,const uint8_t * key,unsigned int key_len,const uint8_t * src[],size_t * src_len,size_t num_elements,uint8_t * out)405 int qdf_get_keyed_hash(const char *alg, const uint8_t *key,
406 unsigned int key_len, const uint8_t *src[],
407 size_t *src_len, size_t num_elements, uint8_t *out)
408 {
409 struct crypto_shash *tfm;
410 int ret;
411 size_t i;
412
413 tfm = crypto_alloc_shash(alg, 0, CRYPTO_ALG_ASYNC);
414 if (IS_ERR(tfm)) {
415 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
416 FL("Failed to allocate transformation for %s: %ld"),
417 alg, PTR_ERR(tfm));
418 return -EINVAL;
419 }
420
421 if (key && key_len) {
422 ret = crypto_shash_setkey(tfm, key, key_len);
423 if (ret) {
424 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
425 FL("Set key failed for %s, ret:%d"),
426 alg, -ret);
427 goto error;
428 }
429 }
430
431 do {
432 SHASH_DESC_ON_STACK(desc, tfm);
433 desc->tfm = tfm;
434 set_desc_flags(desc, tfm);
435
436 ret = crypto_shash_init(desc);
437 if (ret) {
438 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
439 FL("Failed to init hash for %s, ret:%d"),
440 alg, -ret);
441 goto error;
442 }
443
444 for (i = 0; i < num_elements; i++) {
445 ret = crypto_shash_update(desc, src[i], src_len[i]);
446 if (ret) {
447 QDF_TRACE(QDF_MODULE_ID_QDF,
448 QDF_TRACE_LEVEL_ERROR,
449 FL("Failed to update hash for %s, ret:%d"),
450 alg, -ret);
451 goto error;
452 }
453 }
454
455 ret = crypto_shash_final(desc, out);
456 if (ret)
457 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
458 FL("Failed to get digest for %s, ret:%d"),
459 alg, -ret);
460 } while (0);
461
462 error:
463 crypto_free_shash(tfm);
464 return ret;
465 }
466
467 qdf_export_symbol(qdf_get_keyed_hash);
468
469 /* AES String to Vector from RFC 5297, 'out' should be of length AES_BLOCK_SIZE
470 */
qdf_aes_s2v(const uint8_t * key,unsigned int key_len,const uint8_t * s[],size_t s_len[],size_t num_s,uint8_t * out)471 int qdf_aes_s2v(const uint8_t *key, unsigned int key_len, const uint8_t *s[],
472 size_t s_len[], size_t num_s, uint8_t *out)
473 {
474 const char *alg = "cmac(aes)";
475 uint8_t d[AES_BLOCK_SIZE];
476 uint8_t buf[AES_BLOCK_SIZE] = { 0 };
477 size_t buf_len = AES_BLOCK_SIZE;
478 const uint8_t *a[1];
479 unsigned int i;
480 uint8_t *t = NULL;
481 size_t t_len;
482 int ret;
483
484 if (num_s == 0) {
485 /* V = AES-CMAC(K, <one>) */
486 buf[0] = 0x01;
487 a[0] = buf;
488 ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1,
489 out);
490 return ret;
491 }
492
493 /* D = AES-CMAC(K, <zero>) */
494 a[0] = buf;
495 ret = qdf_get_keyed_hash(alg, key, key_len, a, &buf_len, 1, d);
496 if (ret)
497 goto error;
498
499 for (i = 0; i < num_s - 1; i++) {
500 /* D = qdf_update_dbl(D) xor AES-CMAC(K, Si) */
501 qdf_update_dbl(d);
502 ret = qdf_get_keyed_hash(alg, key, key_len, &s[i], &s_len[i], 1,
503 buf);
504 if (ret)
505 goto error;
506 xor(d, buf, AES_BLOCK_SIZE);
507 }
508
509 if (s_len[i] >= AES_BLOCK_SIZE) {
510 /* len(Sn) >= 128 */
511 /* T = Sn xorend D */
512 t = qdf_mem_malloc(s_len[i]);
513 if (!t)
514 return -EINVAL;
515 qdf_mem_copy(t, s[i], s_len[i]);
516 xor(t + s_len[i] - AES_BLOCK_SIZE, d, AES_BLOCK_SIZE);
517 t_len = s_len[i];
518 } else {
519 /* len(Sn) < 128 */
520 /* T = qdf_update_dbl(D) xor pad(Sn) */
521 qdf_update_dbl(d);
522 qdf_mem_zero(buf, AES_BLOCK_SIZE);
523 qdf_mem_copy(buf, s[i], s_len[i]);
524 buf[s_len[i]] = 0x80;
525 xor(d, s[i], AES_BLOCK_SIZE);
526 t = d;
527 t_len = AES_BLOCK_SIZE;
528 }
529
530 /* V = AES-CMAC(K, T) */
531 a[0] = t;
532 ret = qdf_get_keyed_hash(alg, key, key_len, a, &t_len, 1, out);
533
534 error:
535 if (t && t != d)
536 qdf_mem_free(t);
537 return ret;
538 }
539
540 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0))
qdf_aes_ctr(const uint8_t * key,unsigned int key_len,uint8_t * siv,const uint8_t * src,size_t src_len,uint8_t * dest,bool enc)541 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv,
542 const uint8_t *src, size_t src_len, uint8_t *dest, bool enc)
543 {
544 struct crypto_skcipher *tfm;
545 struct skcipher_request *req = NULL;
546 struct scatterlist sg_in, sg_out;
547 int ret;
548
549 if (!IS_VALID_CTR_KEY_LEN(key_len)) {
550 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
551 FL("Invalid key length: %u"), key_len);
552 return -EINVAL;
553 }
554
555 tfm = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
556 if (IS_ERR(tfm)) {
557 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
558 FL("Failed to alloc transformation for ctr(aes):%ld"),
559 PTR_ERR(tfm));
560 return -EAGAIN;
561 }
562
563 req = skcipher_request_alloc(tfm, GFP_KERNEL);
564 if (!req) {
565 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
566 FL("Failed to allocate request for ctr(aes)"));
567 crypto_free_skcipher(tfm);
568 return -EAGAIN;
569 }
570
571 ret = crypto_skcipher_setkey(tfm, key, key_len);
572 if (ret) {
573 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
574 FL("Set key failed for ctr(aes), ret:%d"), -ret);
575 skcipher_request_free(req);
576 crypto_free_skcipher(tfm);
577 return ret;
578 }
579
580 sg_init_one(&sg_in, src, src_len);
581 sg_init_one(&sg_out, dest, src_len);
582 skcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv);
583
584 if (enc)
585 ret = crypto_skcipher_encrypt(req);
586 else
587 ret = crypto_skcipher_decrypt(req);
588
589 if (ret) {
590 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
591 FL("%s failed for ctr(aes), ret:%d"),
592 enc ? "Encryption" : "Decryption", -ret);
593 }
594
595 skcipher_request_free(req);
596 crypto_free_skcipher(tfm);
597 return ret;
598 }
599 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
qdf_aes_ctr(const uint8_t * key,unsigned int key_len,uint8_t * siv,const uint8_t * src,size_t src_len,uint8_t * dest,bool enc)600 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv,
601 const uint8_t *src, size_t src_len, uint8_t *dest, bool enc)
602 {
603 struct crypto_ablkcipher *tfm;
604 struct ablkcipher_request *req = NULL;
605 struct scatterlist sg_in, sg_out;
606 int ret;
607
608 if (!IS_VALID_CTR_KEY_LEN(key_len)) {
609 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
610 FL("Invalid key length: %u"), key_len);
611 return -EINVAL;
612 }
613
614 tfm = crypto_alloc_ablkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
615 if (IS_ERR(tfm)) {
616 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
617 FL("Failed to alloc transformation for ctr(aes):%ld"),
618 PTR_ERR(tfm));
619 return -EAGAIN;
620 }
621
622 req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
623 if (!req) {
624 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
625 FL("Failed to allocate request for ctr(aes)"));
626 crypto_free_ablkcipher(tfm);
627 return -EAGAIN;
628 }
629
630 ret = crypto_ablkcipher_setkey(tfm, key, key_len);
631 if (ret) {
632 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
633 FL("Set key failed for ctr(aes), ret:%d"), -ret);
634 ablkcipher_request_free(req);
635 crypto_free_ablkcipher(tfm);
636 return ret;
637 }
638
639 sg_init_one(&sg_in, src, src_len);
640 sg_init_one(&sg_out, dest, src_len);
641 ablkcipher_request_set_crypt(req, &sg_in, &sg_out, src_len, siv);
642
643 if (enc)
644 ret = crypto_ablkcipher_encrypt(req);
645 else
646 ret = crypto_ablkcipher_decrypt(req);
647
648 if (ret) {
649 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
650 FL("%s failed for ctr(aes), ret:%d"),
651 enc ? "Encryption" : "Decryption", -ret);
652 }
653
654 ablkcipher_request_free(req);
655 crypto_free_ablkcipher(tfm);
656
657 return ret;
658 }
659 #else
qdf_aes_ctr(const uint8_t * key,unsigned int key_len,uint8_t * siv,const uint8_t * src,size_t src_len,uint8_t * dest,bool enc)660 int qdf_aes_ctr(const uint8_t *key, unsigned int key_len, uint8_t *siv,
661 const uint8_t *src, size_t src_len, uint8_t *dest, bool enc)
662 {
663 return -EINVAL;
664 }
665 #endif
666
667 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
qdf_crypto_aes_gmac(const uint8_t * key,uint16_t key_length,uint8_t * iv,const uint8_t * aad,const uint8_t * data,uint16_t data_len,uint8_t * mic)668 int qdf_crypto_aes_gmac(const uint8_t *key, uint16_t key_length,
669 uint8_t *iv, const uint8_t *aad,
670 const uint8_t *data, uint16_t data_len, uint8_t *mic)
671 {
672 struct crypto_aead *tfm;
673 int ret = 0;
674 struct scatterlist sg[4];
675 uint16_t req_size;
676 struct aead_request *req = NULL;
677 uint8_t *aad_ptr, *input;
678
679 tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
680 if (IS_ERR(tfm)) {
681 ret = PTR_ERR(tfm);
682 tfm = NULL;
683 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
684 "%s: crypto_alloc_aead failed (%d)", __func__, ret);
685 goto err_tfm;
686 }
687
688 ret = crypto_aead_setkey(tfm, key, key_length);
689 if (ret) {
690 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
691 "crypto_aead_setkey failed (%d)", ret);
692 goto err_tfm;
693 }
694
695 ret = crypto_aead_setauthsize(tfm, IEEE80211_MMIE_GMAC_MICLEN);
696 if (ret) {
697 QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
698 "crypto_aead_setauthsize failed (%d)", ret);
699 goto err_tfm;
700 }
701
702 /* Prepare aead request */
703 req_size = sizeof(*req) + crypto_aead_reqsize(tfm) +
704 IEEE80211_MMIE_GMAC_MICLEN + AAD_LEN;
705 req = qdf_mem_malloc(req_size);
706 if (!req) {
707 ret = -ENOMEM;
708 goto err_tfm;
709 }
710
711 input = (uint8_t *)req + sizeof(*req) + crypto_aead_reqsize(tfm);
712 aad_ptr = input + IEEE80211_MMIE_GMAC_MICLEN;
713 qdf_mem_copy(aad_ptr, aad, AAD_LEN);
714
715 /* Scatter list operations */
716 sg_init_table(sg, 4);
717 sg_set_buf(&sg[0], aad_ptr, AAD_LEN);
718 sg_set_buf(&sg[1], data, data_len);
719 sg_set_buf(&sg[2], input, IEEE80211_MMIE_GMAC_MICLEN);
720 sg_set_buf(&sg[3], mic, IEEE80211_MMIE_GMAC_MICLEN);
721
722 aead_request_set_tfm(req, tfm);
723 aead_request_set_crypt(req, sg, sg, 0, iv);
724 aead_request_set_ad(req,
725 AAD_LEN + data_len + IEEE80211_MMIE_GMAC_MICLEN);
726 crypto_aead_encrypt(req);
727
728 err_tfm:
729 if (tfm)
730 crypto_free_aead(tfm);
731
732 if (req)
733 qdf_mem_free(req);
734
735 return ret;
736 }
737 #else
qdf_crypto_aes_gmac(uint8_t * key,uint16_t key_length,uint8_t * iv,uint8_t * aad,uint8_t * data,uint16_t data_len,uint8_t * mic)738 int qdf_crypto_aes_gmac(uint8_t *key, uint16_t key_length,
739 uint8_t *iv, uint8_t *aad, uint8_t *data,
740 uint16_t data_len, uint8_t *mic)
741 {
742 return -EINVAL;
743 }
744 #endif
745