diff --git a/src/common/aes/aes128_armv8.c b/src/common/aes/aes128_armv8.c index d506e9d60..5edee57f6 100644 --- a/src/common/aes/aes128_armv8.c +++ b/src/common/aes/aes128_armv8.c @@ -14,6 +14,9 @@ typedef struct { } aes128ctx; void oqs_aes128_load_iv_armv8(const uint8_t *iv, size_t iv_len, void *_schedule) { + if (_schedule == NULL) { + return; + } aes128ctx *ctx = _schedule; if (iv_len == 12) { memcpy(ctx->iv, iv, 12); @@ -63,12 +66,18 @@ static inline void aes128_armv8_encrypt(const unsigned char *rkeys, const unsign } void oqs_aes128_enc_sch_block_armv8(const uint8_t *plaintext, const void *_schedule, uint8_t *ciphertext) { + if (_schedule == NULL || plaintext == NULL || ciphertext == NULL) { + return; + } const unsigned char *schedule = (const unsigned char *) _schedule; aes128_armv8_encrypt(schedule, plaintext, ciphertext); } void oqs_aes128_ecb_enc_sch_armv8(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext) { assert(plaintext_len % 16 == 0); + if (schedule == NULL || plaintext == NULL || ciphertext == NULL) { + return; + } const aes128ctx *ctx = (const aes128ctx *) schedule; for (size_t block = 0; block < plaintext_len / 16; block++) { @@ -91,6 +100,9 @@ static uint32_t UINT32_TO_BE(const uint32_t x) { void oqs_aes128_ctr_enc_sch_upd_blks_armv8(void *schedule, uint8_t *out, size_t out_blks) { + if (schedule == NULL || out == NULL) { + return; + } aes128ctx *ctx = (aes128ctx *) schedule; uint8_t *block = ctx->iv; uint32_t ctr; @@ -108,6 +120,9 @@ void oqs_aes128_ctr_enc_sch_upd_blks_armv8(void *schedule, uint8_t *out, size_t } void oqs_aes128_ctr_enc_sch_armv8(const uint8_t *iv, const size_t iv_len, const void *schedule, uint8_t *out, size_t out_len) { + if (iv == NULL || schedule == NULL || out == NULL) { + return; + } uint8_t block[16]; uint32_t ctr; uint32_t ctr_be; diff --git a/src/common/aes/aes128_ni.c b/src/common/aes/aes128_ni.c index 5b7398a52..0d8fcfe48 100644 --- a/src/common/aes/aes128_ni.c +++ b/src/common/aes/aes128_ni.c @@ -49,14 +49,21 @@ static inline void aes128ni_setkey_encrypt(const unsigned char *key, __m128i rke } void oqs_aes128_load_schedule_ni(const uint8_t *key, void **_schedule) { + if (_schedule == NULL) { + return; + } *_schedule = OQS_MEM_malloc(sizeof(aes128ctx)); - OQS_EXIT_IF_NULLPTR(*_schedule, "AES"); - assert(*_schedule != NULL); + if (*_schedule == NULL) { + return; + } __m128i *schedule = ((aes128ctx *) *_schedule)->sk_exp; aes128ni_setkey_encrypt(key, schedule); } void oqs_aes128_load_iv_ni(const uint8_t *iv, size_t iv_len, void *_schedule) { + if (_schedule == NULL) { + return; + } aes128ctx *ctx = _schedule; __m128i idx = _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 7, 6, 5, 4, 3, 2, 1, 0); if (iv_len == 12) { @@ -70,6 +77,9 @@ void oqs_aes128_load_iv_ni(const uint8_t *iv, size_t iv_len, void *_schedule) { } void oqs_aes128_load_iv_u64_ni(uint64_t iv, void *_schedule) { + if (_schedule == NULL) { + return; + } aes128ctx *ctx = _schedule; ctx->iv = _mm_loadl_epi64((__m128i *)&iv); } @@ -133,11 +143,17 @@ static inline void aes128ni_encrypt_x4(const __m128i rkeys[11], __m128i n0, } void oqs_aes128_enc_sch_block_ni(const uint8_t *plaintext, const void *_schedule, uint8_t *ciphertext) { + if (_schedule == NULL) { + return; + } const __m128i *schedule = ((const aes128ctx *) _schedule)->sk_exp; aes128ni_encrypt(schedule, _mm_loadu_si128((const __m128i *)plaintext), ciphertext); } void oqs_aes128_ecb_enc_sch_ni(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext) { + if (schedule == NULL) { + return; + } assert(plaintext_len % 16 == 0); for (size_t block = 0; block < plaintext_len / 16; block++) { oqs_aes128_enc_sch_block_ni(plaintext + (16 * block), schedule, ciphertext + (16 * block)); @@ -145,6 +161,9 @@ void oqs_aes128_ecb_enc_sch_ni(const uint8_t *plaintext, const size_t plaintext_ } void oqs_aes128_ctr_enc_sch_upd_blks_ni(void *schedule, uint8_t *out, size_t out_blks) { + if (schedule == NULL) { + return; + } aes128ctx *ctx = (aes128ctx *) schedule; const __m128i mask = _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 7, 6, 5, 4, 3, 2, 1, 0); @@ -168,6 +187,9 @@ void oqs_aes128_ctr_enc_sch_upd_blks_ni(void *schedule, uint8_t *out, size_t out } void oqs_aes128_ctr_enc_sch_ni(const uint8_t *iv, const size_t iv_len, const void *schedule, uint8_t *out, size_t out_len) { + if (schedule == NULL) { + return; + } __m128i block; __m128i mask = _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 7, 6, 5, 4, 3, 2, 1, 0); if (iv_len == 12) { diff --git a/src/common/aes/aes256_armv8.c b/src/common/aes/aes256_armv8.c index e7c34baff..6438b083c 100644 --- a/src/common/aes/aes256_armv8.c +++ b/src/common/aes/aes256_armv8.c @@ -14,6 +14,9 @@ typedef struct { } aes256ctx_nobitslice; void oqs_aes256_load_iv_armv8(const uint8_t *iv, size_t iv_len, void *_schedule) { + if (_schedule == NULL) { + return; + } aes256ctx_nobitslice *ctx = _schedule; if (iv_len == 12) { memcpy(ctx->iv, iv, 12); @@ -70,11 +73,17 @@ static inline void aes256_armv8_encrypt(const unsigned char *rkeys, const unsign } void oqs_aes256_enc_sch_block_armv8(const uint8_t *plaintext, const void *_schedule, uint8_t *ciphertext) { + if (_schedule == NULL) { + return; + } const unsigned char *schedule = (const unsigned char *) ((const aes256ctx_nobitslice *) _schedule)->sk_exp; aes256_armv8_encrypt(schedule, plaintext, ciphertext); } void oqs_aes256_ecb_enc_sch_armv8(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext) { + if (schedule == NULL) { + return; + } assert(plaintext_len % 16 == 0); for (size_t block = 0; block < plaintext_len / 16; block++) { oqs_aes256_enc_sch_block_armv8(plaintext + (16 * block), schedule, ciphertext + (16 * block)); @@ -95,6 +104,9 @@ static uint32_t UINT32_TO_BE(const uint32_t x) { #define BE_TO_UINT32(n) (uint32_t)((((uint8_t *) &(n))[0] << 24) | (((uint8_t *) &(n))[1] << 16) | (((uint8_t *) &(n))[2] << 8) | (((uint8_t *) &(n))[3] << 0)) void oqs_aes256_ctr_enc_sch_upd_blks_armv8(void *schedule, uint8_t *out, size_t out_blks) { + if (schedule == NULL) { + return; + } aes256ctx_nobitslice *ctx = (aes256ctx_nobitslice *) schedule; uint8_t *block = ctx->iv; uint32_t ctr; @@ -112,6 +124,9 @@ void oqs_aes256_ctr_enc_sch_upd_blks_armv8(void *schedule, uint8_t *out, size_t } void oqs_aes256_ctr_enc_sch_armv8(const uint8_t *iv, const size_t iv_len, const void *schedule, uint8_t *out, size_t out_len) { + if (schedule == NULL || iv == NULL || out == NULL) { + return; + } uint8_t block[16]; uint32_t ctr; uint32_t ctr_be; diff --git a/src/common/aes/aes256_ni.c b/src/common/aes/aes256_ni.c index d5ae56e2a..266fc98a0 100644 --- a/src/common/aes/aes256_ni.c +++ b/src/common/aes/aes256_ni.c @@ -76,14 +76,21 @@ static inline void aes256ni_setkey_encrypt(const unsigned char *key, __m128i rke } void oqs_aes256_load_schedule_ni(const uint8_t *key, void **_schedule) { + if (_schedule == NULL) { + return; + } *_schedule = OQS_MEM_malloc(sizeof(aes256ctx)); - OQS_EXIT_IF_NULLPTR(*_schedule, "AES"); - assert(*_schedule != NULL); + if (*_schedule == NULL) { + return; + } __m128i *schedule = ((aes256ctx *) *_schedule)->sk_exp; aes256ni_setkey_encrypt(key, schedule); } void oqs_aes256_load_iv_ni(const uint8_t *iv, size_t iv_len, void *_schedule) { + if (_schedule == NULL) { + return; + } aes256ctx *ctx = _schedule; __m128i idx = _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 7, 6, 5, 4, 3, 2, 1, 0); if (iv_len == 12) { @@ -97,6 +104,9 @@ void oqs_aes256_load_iv_ni(const uint8_t *iv, size_t iv_len, void *_schedule) { } void oqs_aes256_load_iv_u64_ni(uint64_t iv, void *_schedule) { + if (_schedule == NULL) { + return; + } aes256ctx *ctx = _schedule; ctx->iv = _mm_loadl_epi64((__m128i *)&iv); } @@ -167,11 +177,16 @@ static inline void aes256ni_encrypt_x4(const __m128i rkeys[15], __m128i n0, __m1 } void oqs_aes256_enc_sch_block_ni(const uint8_t *plaintext, const void *_schedule, uint8_t *ciphertext) { + if (_schedule == NULL || plaintext == NULL || ciphertext == NULL) { + return; + } const __m128i *schedule = ((const aes256ctx *) _schedule)->sk_exp; aes256ni_encrypt(schedule, _mm_loadu_si128((const __m128i *)plaintext), ciphertext); } - void oqs_aes256_ecb_enc_sch_ni(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext) { + if (plaintext == NULL || schedule == NULL || ciphertext == NULL) { + return; + } assert(plaintext_len % 16 == 0); for (size_t block = 0; block < plaintext_len / 16; block++) { oqs_aes256_enc_sch_block_ni(plaintext + (16 * block), schedule, ciphertext + (16 * block)); @@ -179,6 +194,9 @@ void oqs_aes256_ecb_enc_sch_ni(const uint8_t *plaintext, const size_t plaintext_ } void oqs_aes256_ctr_enc_sch_upd_blks_ni(void *schedule, uint8_t *out, size_t out_blks) { + if (schedule == NULL || out == NULL) { + return; + } aes256ctx *ctx = (aes256ctx *) schedule; const __m128i mask = _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 7, 6, 5, 4, 3, 2, 1, 0); @@ -202,6 +220,9 @@ void oqs_aes256_ctr_enc_sch_upd_blks_ni(void *schedule, uint8_t *out, size_t out } void oqs_aes256_ctr_enc_sch_ni(const uint8_t *iv, const size_t iv_len, const void *schedule, uint8_t *out, size_t out_len) { + if (iv == NULL || schedule == NULL || out == NULL) { + return; + } __m128i block; __m128i mask = _mm_set_epi8(8, 9, 10, 11, 12, 13, 14, 15, 7, 6, 5, 4, 3, 2, 1, 0); if (iv_len == 12) { diff --git a/src/common/aes/aes_c.c b/src/common/aes/aes_c.c index df75b6322..5d945f899 100644 --- a/src/common/aes/aes_c.c +++ b/src/common/aes/aes_c.c @@ -59,6 +59,9 @@ typedef struct { } aes256ctx_nobitslice; static inline uint32_t br_dec32le(const unsigned char *src) { + if (src == NULL) { + return 0; + } return (uint32_t)src[0] | ((uint32_t)src[1] << 8) | ((uint32_t)src[2] << 16) @@ -67,8 +70,11 @@ static inline uint32_t br_dec32le(const unsigned char *src) { static void br_range_dec32le(uint32_t *v, size_t num, const unsigned char *src) { + if (v == NULL || src == NULL) { + return; + } while (num-- > 0) { - *v ++ = br_dec32le(src); + *v++ = br_dec32le(src); src += 4; } } @@ -82,6 +88,9 @@ static inline uint32_t br_swap32(uint32_t x) { static inline void br_enc32le(unsigned char *dst, uint32_t x) { + if (dst == NULL) { + return; + } dst[0] = (unsigned char)x; dst[1] = (unsigned char)(x >> 8); dst[2] = (unsigned char)(x >> 16); @@ -89,6 +98,9 @@ static inline void br_enc32le(unsigned char *dst, uint32_t x) { } static inline void br_enc32be(unsigned char *dst, uint32_t x) { + if (dst == NULL) { + return; + } dst[0] = (unsigned char)(x >> 24); dst[1] = (unsigned char)(x >> 16); dst[2] = (unsigned char)(x >> 8); @@ -96,14 +108,20 @@ static inline void br_enc32be(unsigned char *dst, uint32_t x) { } static void br_range_enc32le(unsigned char *dst, const uint32_t *v, size_t num) { + if (dst == NULL || v == NULL) { + return; + } while (num-- > 0) { - br_enc32le(dst, *v ++); + br_enc32le(dst, *v++); dst += 4; } } static void br_aes_ct64_bitslice_Sbox(uint64_t *q) { + if (q == NULL) { + return; + } /* * This S-box implementation is a straightforward translation of * the circuit described by Boyar and Peralta in "A new @@ -278,6 +296,10 @@ static void br_aes_ct64_bitslice_Sbox(uint64_t *q) { } static void br_aes_ct64_ortho(uint64_t *q) { + if (q == NULL) { + return; + } + #define SWAPN(cl, ch, s, x, y) do { \ uint64_t a, b; \ a = (x); \ @@ -308,6 +330,10 @@ static void br_aes_ct64_ortho(uint64_t *q) { static void br_aes_ct64_interleave_in(uint64_t *q0, uint64_t *q1, const uint32_t *w) { + if (q0 == NULL || q1 == NULL || w == NULL) { + return; + } + uint64_t x0, x1, x2, x3; x0 = w[0]; @@ -336,6 +362,10 @@ static void br_aes_ct64_interleave_in(uint64_t *q0, uint64_t *q1, const uint32_t static void br_aes_ct64_interleave_out(uint32_t *w, uint64_t q0, uint64_t q1) { + if (w == NULL) { + return; + } + uint64_t x0, x1, x2, x3; x0 = q0 & (uint64_t)0x00FF00FF00FF00FF; @@ -370,8 +400,11 @@ static uint32_t sub_word(uint32_t x) { br_aes_ct64_ortho(q); return (uint32_t)q[0]; } - static void br_aes_ct64_keysched(uint64_t *comp_skey, const unsigned char *key, unsigned int key_len) { + if (comp_skey == NULL || key == NULL) { + return; + } + unsigned int i, j, k, nk, nkf; uint32_t tmp; uint32_t skey[60]; @@ -421,6 +454,10 @@ static void br_aes_ct64_keysched(uint64_t *comp_skey, const unsigned char *key, } static void br_aes_ct64_skey_expand(uint64_t *skey, const uint64_t *comp_skey, unsigned int nrounds) { + if (skey == NULL || comp_skey == NULL) { + return; + } + unsigned u, v, n; n = (nrounds + 1) << 1; @@ -444,6 +481,10 @@ static void br_aes_ct64_skey_expand(uint64_t *skey, const uint64_t *comp_skey, u static inline void add_round_key(uint64_t *q, const uint64_t *sk) { + if (q == NULL || sk == NULL) { + return; + } + q[0] ^= sk[0]; q[1] ^= sk[1]; q[2] ^= sk[2]; @@ -455,6 +496,10 @@ static inline void add_round_key(uint64_t *q, const uint64_t *sk) { } static inline void shift_rows(uint64_t *q) { + if (q == NULL) { + return; + } + int i; for (i = 0; i < 8; i ++) { @@ -476,6 +521,10 @@ static inline uint64_t rotr32(uint64_t x) { } static inline void mix_columns(uint64_t *q) { + if (q == NULL) { + return; + } + uint64_t q0, q1, q2, q3, q4, q5, q6, q7; uint64_t r0, r1, r2, r3, r4, r5, r6, r7; @@ -508,12 +557,20 @@ static inline void mix_columns(uint64_t *q) { static void inc4_be(uint32_t *x) { + if (x == NULL) { + return; + } + uint32_t t = br_swap32(*x) + 4; *x = br_swap32(t); } static void aes_ecb4x(unsigned char out[64], const uint32_t ivw[16], const uint64_t *sk_exp, unsigned int nrounds) { + if (out == NULL || ivw == NULL || sk_exp == NULL) { + return; + } + uint32_t w[16]; uint64_t q[8]; unsigned int i; @@ -543,8 +600,10 @@ static void aes_ecb4x(unsigned char out[64], const uint32_t ivw[16], const uint6 br_range_enc32le(out, w, 16); } - static void aes_ctr4x(unsigned char out[64], uint32_t ivw[16], const uint64_t *sk_exp, unsigned int nrounds) { + if (out == NULL || ivw == NULL || sk_exp == NULL) { + return; + } aes_ecb4x(out, ivw, sk_exp, nrounds); /* Increase counter for next 4 blocks */ @@ -556,6 +615,9 @@ static void aes_ctr4x(unsigned char out[64], uint32_t ivw[16], const uint64_t *s static void aes_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, const uint64_t *rkeys, unsigned int nrounds) { + if (out == NULL || in == NULL || rkeys == NULL) { + return; + } uint32_t blocks[16]; unsigned char t[64]; @@ -575,6 +637,9 @@ static void aes_ecb(unsigned char *out, const unsigned char *in, size_t nblocks, } static inline void aes128_ctr_upd_blks(unsigned char *out, size_t outblks, aes128ctx *ctx) { + if (out == NULL || ctx == NULL) { + return; + } uint32_t ivw[16]; size_t i; uint32_t cc; @@ -608,6 +673,9 @@ static inline void aes128_ctr_upd_blks(unsigned char *out, size_t outblks, aes12 } static inline void aes256_ctr_upd_blks(unsigned char *out, size_t outblks, aes256ctx *ctx) { + if (out == NULL || ctx == NULL) { + return; + } uint32_t ivw[16]; size_t i; uint32_t cc; @@ -641,6 +709,9 @@ static inline void aes256_ctr_upd_blks(unsigned char *out, size_t outblks, aes25 } static void aes_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, const size_t iv_len, const uint64_t *rkeys, unsigned int nrounds) { + if (out == NULL || iv == NULL || rkeys == NULL) { + return; + } uint32_t ivw[16]; size_t i; uint32_t cc; @@ -676,8 +747,13 @@ static void aes_ctr(unsigned char *out, size_t outlen, const unsigned char *iv, } void oqs_aes128_load_schedule_c(const uint8_t *key, void **_schedule) { + if (key == NULL || _schedule == NULL) { + return; + } *_schedule = OQS_MEM_malloc(sizeof(aes128ctx)); - OQS_EXIT_IF_NULLPTR(*_schedule, "AES"); + if (*_schedule == NULL) { + return; + } aes128ctx *ctx = (aes128ctx *) *_schedule; uint64_t skey[22]; br_aes_ct64_keysched(skey, key, 16); @@ -685,8 +761,13 @@ void oqs_aes128_load_schedule_c(const uint8_t *key, void **_schedule) { } void oqs_aes256_load_schedule_c(const uint8_t *key, void **_schedule) { + if (key == NULL || _schedule == NULL) { + return; + } *_schedule = OQS_MEM_malloc(sizeof(aes256ctx)); - OQS_EXIT_IF_NULLPTR(*_schedule, "AES"); + if (*_schedule == NULL) { + return; + } aes256ctx *ctx = (aes256ctx *) *_schedule; uint64_t skey[30]; br_aes_ct64_keysched(skey, key, 32); @@ -694,6 +775,9 @@ void oqs_aes256_load_schedule_c(const uint8_t *key, void **_schedule) { } static void aes_keysched_no_bitslice(uint32_t *skey, const unsigned char *key, unsigned int key_len) { + if (skey == NULL || key == NULL) { + return; + } unsigned int i, j, k, nk, nkf; uint32_t tmp; unsigned nrounds = 10 + ((key_len - 16) >> 2); @@ -719,13 +803,21 @@ static void aes_keysched_no_bitslice(uint32_t *skey, const unsigned char *key, u } void oqs_aes256_load_schedule_no_bitslice(const uint8_t *key, void **_schedule) { + if (key == NULL || _schedule == NULL) { + return; + } *_schedule = OQS_MEM_malloc(sizeof(aes256ctx_nobitslice)); - assert(*_schedule != NULL); + if (*_schedule == NULL) { + return; + } uint32_t *schedule = ((aes256ctx_nobitslice *) *_schedule)->sk_exp; aes_keysched_no_bitslice(schedule, (const unsigned char *) key, 32); } void oqs_aes256_load_iv_c(const uint8_t *iv, size_t iv_len, void *_schedule) { + if (iv == NULL || _schedule == NULL) { + return; + } aes256ctx *ctx = _schedule; if (iv_len == 12) { memcpy(ctx->iv, iv, 12); @@ -736,9 +828,10 @@ void oqs_aes256_load_iv_c(const uint8_t *iv, size_t iv_len, void *_schedule) { return; /* TODO: better error handling */ } } - void oqs_aes256_load_iv_u64_c(uint64_t iv, void *schedule) { - OQS_EXIT_IF_NULLPTR(schedule, "AES"); + if (schedule == NULL) { + return; + } aes256ctx *ctx = (aes256ctx *) schedule; ctx->iv[7] = (unsigned char)(iv >> 56); ctx->iv[6] = (unsigned char)(iv >> 48); @@ -752,13 +845,21 @@ void oqs_aes256_load_iv_u64_c(uint64_t iv, void *schedule) { } void oqs_aes128_load_schedule_no_bitslice(const uint8_t *key, void **_schedule) { + if (key == NULL || _schedule == NULL) { + return; + } *_schedule = OQS_MEM_malloc(44 * sizeof(int)); - assert(*_schedule != NULL); + if (*_schedule == NULL) { + return; + } uint32_t *schedule = (uint32_t *) *_schedule; aes_keysched_no_bitslice(schedule, (const unsigned char *) key, 16); } void oqs_aes128_load_iv_c(const uint8_t *iv, size_t iv_len, void *_schedule) { + if (iv == NULL || _schedule == NULL) { + return; + } aes128ctx *ctx = _schedule; if (iv_len == 12) { memcpy(ctx->iv, iv, 12); @@ -771,7 +872,9 @@ void oqs_aes128_load_iv_c(const uint8_t *iv, size_t iv_len, void *_schedule) { } void oqs_aes128_load_iv_u64_c(uint64_t iv, void *schedule) { - OQS_EXIT_IF_NULLPTR(schedule, "AES"); + if (schedule == NULL) { + return; + } aes128ctx *ctx = (aes128ctx *) schedule; ctx->iv[7] = (unsigned char)(iv >> 56); ctx->iv[6] = (unsigned char)(iv >> 48); @@ -785,33 +888,49 @@ void oqs_aes128_load_iv_u64_c(uint64_t iv, void *schedule) { } void oqs_aes128_ecb_enc_sch_c(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext) { - assert(plaintext_len % 16 == 0); + if (plaintext == NULL || schedule == NULL || ciphertext == NULL || plaintext_len % 16 != 0) { + return; + } const aes128ctx *ctx = (const aes128ctx *) schedule; aes_ecb(ciphertext, plaintext, plaintext_len / 16, ctx->sk_exp, 10); } void oqs_aes128_ctr_enc_sch_c(const uint8_t *iv, const size_t iv_len, const void *schedule, uint8_t *out, size_t out_len) { + if (iv == NULL || schedule == NULL || out == NULL) { + return; + } const aes128ctx *ctx = (const aes128ctx *) schedule; aes_ctr(out, out_len, iv, iv_len, ctx->sk_exp, 10); } void oqs_aes128_ctr_enc_sch_upd_blks_c(void *schedule, uint8_t *out, size_t out_blks) { + if (schedule == NULL || out == NULL) { + return; + } aes128ctx *ctx = (aes128ctx *) schedule; aes128_ctr_upd_blks(out, out_blks, ctx); } void oqs_aes256_ecb_enc_sch_c(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext) { - assert(plaintext_len % 16 == 0); + if (plaintext == NULL || schedule == NULL || ciphertext == NULL || plaintext_len % 16 != 0) { + return; + } const aes256ctx *ctx = (const aes256ctx *) schedule; aes_ecb(ciphertext, plaintext, plaintext_len / 16, ctx->sk_exp, 14); } void oqs_aes256_ctr_enc_sch_c(const uint8_t *iv, const size_t iv_len, const void *schedule, uint8_t *out, size_t out_len) { + if (iv == NULL || schedule == NULL || out == NULL) { + return; + } const aes256ctx *ctx = (const aes256ctx *) schedule; aes_ctr(out, out_len, iv, iv_len, ctx->sk_exp, 14); } void oqs_aes256_ctr_enc_sch_upd_blks_c(void *schedule, uint8_t *out, size_t out_blks) { + if (schedule == NULL || out == NULL) { + return; + } aes256ctx *ctx = (aes256ctx *) schedule; aes256_ctr_upd_blks(out, out_blks, ctx); } diff --git a/src/common/aes/aes_impl.c b/src/common/aes/aes_impl.c index 706a5f186..8c2aa2a63 100644 --- a/src/common/aes/aes_impl.c +++ b/src/common/aes/aes_impl.c @@ -39,6 +39,9 @@ #endif static void AES128_ECB_load_schedule(const uint8_t *key, void **_schedule) { + if (key == NULL || _schedule == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes128_load_schedule_c(key, _schedule), oqs_aes128_load_schedule_ni(key, _schedule), @@ -47,10 +50,16 @@ static void AES128_ECB_load_schedule(const uint8_t *key, void **_schedule) { } static void AES128_CTR_inc_init(const uint8_t *key, void **_schedule) { + if (key == NULL || _schedule == NULL) { + return; + } AES128_ECB_load_schedule(key, _schedule); } static void AES128_CTR_inc_iv(const uint8_t *iv, size_t iv_len, void *_schedule) { + if (iv == NULL || _schedule == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes128_load_iv_c(iv, iv_len, _schedule), oqs_aes128_load_iv_ni(iv, iv_len, _schedule), @@ -59,6 +68,9 @@ static void AES128_CTR_inc_iv(const uint8_t *iv, size_t iv_len, void *_schedule) } static void AES128_CTR_inc_ivu64(uint64_t iv, void *_schedule) { + if (_schedule == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes128_load_iv_u64_c(iv, _schedule), oqs_aes128_load_iv_u64_ni(iv, _schedule), @@ -67,6 +79,9 @@ static void AES128_CTR_inc_ivu64(uint64_t iv, void *_schedule) { } static void AES128_free_schedule(void *schedule) { + if (schedule == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes128_free_schedule_c(schedule), oqs_aes128_free_schedule_ni(schedule), @@ -75,6 +90,9 @@ static void AES128_free_schedule(void *schedule) { } static void AES256_ECB_load_schedule(const uint8_t *key, void **_schedule) { + if (key == NULL || _schedule == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes256_load_schedule_c(key, _schedule), oqs_aes256_load_schedule_ni(key, _schedule), @@ -83,10 +101,16 @@ static void AES256_ECB_load_schedule(const uint8_t *key, void **_schedule) { } static void AES256_CTR_inc_init(const uint8_t *key, void **_schedule) { + if (key == NULL || _schedule == NULL) { + return; + } AES256_ECB_load_schedule(key, _schedule); } static void AES256_CTR_inc_iv(const uint8_t *iv, size_t iv_len, void *_schedule) { + if (iv == NULL || _schedule == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes256_load_iv_c(iv, iv_len, _schedule), oqs_aes256_load_iv_ni(iv, iv_len, _schedule), @@ -95,6 +119,9 @@ static void AES256_CTR_inc_iv(const uint8_t *iv, size_t iv_len, void *_schedule) } static void AES256_CTR_inc_ivu64(uint64_t iv, void *_schedule) { + if (_schedule == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes256_load_iv_u64_c(iv, _schedule), oqs_aes256_load_iv_u64_ni(iv, _schedule), @@ -103,6 +130,9 @@ static void AES256_CTR_inc_ivu64(uint64_t iv, void *_schedule) { } static void AES256_free_schedule(void *schedule) { + if (schedule == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes256_free_schedule_c(schedule), oqs_aes256_free_schedule_ni(schedule), @@ -113,13 +143,22 @@ static void AES256_free_schedule(void *schedule) { static void AES128_ECB_enc_sch(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext); static void AES128_ECB_enc(const uint8_t *plaintext, const size_t plaintext_len, const uint8_t *key, uint8_t *ciphertext) { + if (plaintext == NULL || key == NULL || ciphertext == NULL) { + return; + } void *schedule = NULL; AES128_ECB_load_schedule(key, &schedule); + if (schedule == NULL) { + return; + } AES128_ECB_enc_sch(plaintext, plaintext_len, schedule, ciphertext); AES128_free_schedule(schedule); } static void AES128_ECB_enc_sch(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext) { + if (plaintext == NULL || schedule == NULL || ciphertext == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes128_ecb_enc_sch_c(plaintext, plaintext_len, schedule, ciphertext), oqs_aes128_ecb_enc_sch_ni(plaintext, plaintext_len, schedule, ciphertext), @@ -128,6 +167,9 @@ static void AES128_ECB_enc_sch(const uint8_t *plaintext, const size_t plaintext_ } static void AES128_CTR_inc_stream_iv(const uint8_t *iv, const size_t iv_len, const void *schedule, uint8_t *out, size_t out_len) { + if (iv == NULL || schedule == NULL || out == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes128_ctr_enc_sch_c(iv, iv_len, schedule, out, out_len), oqs_aes128_ctr_enc_sch_ni(iv, iv_len, schedule, out, out_len), @@ -138,13 +180,22 @@ static void AES128_CTR_inc_stream_iv(const uint8_t *iv, const size_t iv_len, con static void AES256_ECB_enc_sch(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext); static void AES256_ECB_enc(const uint8_t *plaintext, const size_t plaintext_len, const uint8_t *key, uint8_t *ciphertext) { + if (plaintext == NULL || key == NULL || ciphertext == NULL) { + return; + } void *schedule = NULL; AES256_ECB_load_schedule(key, &schedule); + if (schedule == NULL) { + return; + } AES256_ECB_enc_sch(plaintext, plaintext_len, schedule, ciphertext); AES256_free_schedule(schedule); } static void AES256_ECB_enc_sch(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext) { + if (plaintext == NULL || schedule == NULL || ciphertext == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes256_ecb_enc_sch_c(plaintext, plaintext_len, schedule, ciphertext), oqs_aes256_ecb_enc_sch_ni(plaintext, plaintext_len, schedule, ciphertext), @@ -153,6 +204,9 @@ static void AES256_ECB_enc_sch(const uint8_t *plaintext, const size_t plaintext_ } static void AES256_CTR_inc_stream_iv(const uint8_t *iv, const size_t iv_len, const void *schedule, uint8_t *out, size_t out_len) { + if (iv == NULL || schedule == NULL || out == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes256_ctr_enc_sch_c(iv, iv_len, schedule, out, out_len), oqs_aes256_ctr_enc_sch_ni(iv, iv_len, schedule, out, out_len), @@ -161,6 +215,9 @@ static void AES256_CTR_inc_stream_iv(const uint8_t *iv, const size_t iv_len, con } static void AES256_CTR_inc_stream_blks(void *schedule, uint8_t *out, size_t out_blks) { + if (schedule == NULL || out == NULL) { + return; + } C_OR_NI_OR_ARM( oqs_aes256_ctr_enc_sch_upd_blks_c(schedule, out, out_blks), oqs_aes256_ctr_enc_sch_upd_blks_ni(schedule, out, out_blks), diff --git a/src/common/aes/aes_ossl.c b/src/common/aes/aes_ossl.c index 1e48124a3..96993739a 100644 --- a/src/common/aes/aes_ossl.c +++ b/src/common/aes/aes_ossl.c @@ -17,6 +17,9 @@ struct key_schedule { }; static inline void br_enc64be(unsigned char *dst, uint64_t x) { + if (dst == NULL) { + return; + } dst[7] = (unsigned char)(x >> 56); dst[6] = (unsigned char)(x >> 48); dst[5] = (unsigned char)(x >> 40); @@ -28,13 +31,25 @@ static inline void br_enc64be(unsigned char *dst, uint64_t x) { } static void AES128_ECB_load_schedule(const uint8_t *key, void **schedule) { + if (key == NULL || schedule == NULL) { + return; + } *schedule = OQS_MEM_malloc(sizeof(struct key_schedule)); - OQS_EXIT_IF_NULLPTR(*schedule, "OpenSSL"); + if (*schedule == NULL) { + OQS_EXIT_IF_NULLPTR(*schedule, "OpenSSL"); + } struct key_schedule *ks = (struct key_schedule *) *schedule; ks->for_ECB = 1; ks->ctx = OSSL_FUNC(EVP_CIPHER_CTX_new)(); - OQS_EXIT_IF_NULLPTR(ks->ctx, "OpenSSL"); - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_128_ecb(), NULL, key, NULL)); + if (ks->ctx == NULL) { + OQS_MEM_secure_free(*schedule, sizeof(struct key_schedule)); + OQS_EXIT_IF_NULLPTR(ks->ctx, "OpenSSL"); + } + if (OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_128_ecb(), NULL, key, NULL) != 1) { + OSSL_FUNC(EVP_CIPHER_CTX_free)(ks->ctx); + OQS_MEM_secure_free(*schedule, sizeof(struct key_schedule)); + OQS_EXIT("OpenSSL: EVP_EncryptInit_ex failed"); + } OSSL_FUNC(EVP_CIPHER_CTX_set_padding)(ks->ctx, 0); } @@ -50,25 +65,42 @@ static void AES128_free_schedule(void *schedule) { } static void AES128_ECB_enc(const uint8_t *plaintext, const size_t plaintext_len, const uint8_t *key, uint8_t *ciphertext) { + if (plaintext == NULL || key == NULL || ciphertext == NULL) { + return; + } void *schedule = NULL; OQS_AES128_ECB_load_schedule(key, &schedule); - OQS_AES128_ECB_enc_sch(plaintext, plaintext_len, schedule, ciphertext); - OQS_AES128_free_schedule(schedule); + if (schedule != NULL) { + OQS_AES128_ECB_enc_sch(plaintext, plaintext_len, schedule, ciphertext); + OQS_AES128_free_schedule(schedule); + } } static void AES128_ECB_enc_sch(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext) { + if (plaintext == NULL || schedule == NULL || ciphertext == NULL) { + return; + } assert(plaintext_len % 16 == 0); int outlen; const struct key_schedule *ks = (const struct key_schedule *) schedule; SIZE_T_TO_INT_OR_EXIT(plaintext_len, plaintext_len_int) - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptUpdate)(ks->ctx, ciphertext, &outlen, plaintext, plaintext_len_int)); + if (OSSL_FUNC(EVP_EncryptUpdate)(ks->ctx, ciphertext, &outlen, plaintext, plaintext_len_int) != 1) { + OQS_EXIT("OpenSSL: EVP_EncryptUpdate failed"); + } assert(outlen == plaintext_len_int); - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptFinal_ex)(ks->ctx, ciphertext, &outlen)); + if (OSSL_FUNC(EVP_EncryptFinal_ex)(ks->ctx, ciphertext, &outlen) != 1) { + OQS_EXIT("OpenSSL: EVP_EncryptFinal_ex failed"); + } } static void AES128_CTR_inc_stream_iv(const uint8_t *iv, size_t iv_len, const void *schedule, uint8_t *out, size_t out_len) { + if (iv == NULL || schedule == NULL || out == NULL) { + return; + } EVP_CIPHER_CTX *ctr_ctx = OSSL_FUNC(EVP_CIPHER_CTX_new()); - OQS_EXIT_IF_NULLPTR(ctr_ctx, "OpenSSL"); + if (ctr_ctx == NULL) { + OQS_EXIT_IF_NULLPTR(ctr_ctx, "OpenSSL"); + } uint8_t iv_ctr[16]; if (iv_len == 12) { memcpy(iv_ctr, iv, 12); @@ -79,26 +111,44 @@ static void AES128_CTR_inc_stream_iv(const uint8_t *iv, size_t iv_len, const voi } else if (iv_len == 16) { memcpy(iv_ctr, iv, 16); } else { + OSSL_FUNC(EVP_CIPHER_CTX_free)(ctr_ctx); return; /* TODO: better error handling */ } const struct key_schedule *ks = (const struct key_schedule *) schedule; - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptInit_ex)(ctr_ctx, oqs_aes_128_ctr(), NULL, ks->key, iv_ctr)); + if (OSSL_FUNC(EVP_EncryptInit_ex)(ctr_ctx, oqs_aes_128_ctr(), NULL, ks->key, iv_ctr) != 1) { + OSSL_FUNC(EVP_CIPHER_CTX_free)(ctr_ctx); + OQS_EXIT("OpenSSL: EVP_EncryptInit_ex failed"); + } SIZE_T_TO_INT_OR_EXIT(out_len, out_len_input_int) memset(out, 0, (size_t)out_len_input_int); int out_len_output; - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptUpdate)(ctr_ctx, out, &out_len_output, out, out_len_input_int)); - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptFinal_ex)(ctr_ctx, out + out_len_output, &out_len_output)); + if (OSSL_FUNC(EVP_EncryptUpdate)(ctr_ctx, out, &out_len_output, out, out_len_input_int) != 1) { + OSSL_FUNC(EVP_CIPHER_CTX_free)(ctr_ctx); + OQS_EXIT("OpenSSL: EVP_EncryptUpdate failed"); + } + if (OSSL_FUNC(EVP_EncryptFinal_ex)(ctr_ctx, out + out_len_output, &out_len_output) != 1) { + OSSL_FUNC(EVP_CIPHER_CTX_free)(ctr_ctx); + OQS_EXIT("OpenSSL: EVP_EncryptFinal_ex failed"); + } OSSL_FUNC(EVP_CIPHER_CTX_free)(ctr_ctx); } static void AES128_CTR_inc_init(const uint8_t *key, void **schedule) { + if (key == NULL || schedule == NULL) { + return; + } *schedule = OQS_MEM_malloc(sizeof(struct key_schedule)); - OQS_EXIT_IF_NULLPTR(*schedule, "OpenSSL"); + if (*schedule == NULL) { + OQS_EXIT_IF_NULLPTR(*schedule, "OpenSSL"); + } struct key_schedule *ks = (struct key_schedule *) *schedule; EVP_CIPHER_CTX *ctr_ctx = OSSL_FUNC(EVP_CIPHER_CTX_new)(); - OQS_EXIT_IF_NULLPTR(ctr_ctx, "OpenSSL"); + if (ctr_ctx == NULL) { + OQS_MEM_secure_free(*schedule, sizeof(struct key_schedule)); + OQS_EXIT_IF_NULLPTR(ctr_ctx, "OpenSSL"); + } ks->for_ECB = 0; ks->ctx = ctr_ctx; @@ -106,7 +156,9 @@ static void AES128_CTR_inc_init(const uint8_t *key, void **schedule) { } static void AES128_CTR_inc_iv(const uint8_t *iv, size_t iv_len, void *schedule) { - OQS_EXIT_IF_NULLPTR(schedule, "OpenSSL"); + if (iv == NULL || schedule == NULL) { + return; + } struct key_schedule *ks = (struct key_schedule *) schedule; if (iv_len == 12) { memcpy(ks->iv, iv, 12); @@ -116,35 +168,61 @@ static void AES128_CTR_inc_iv(const uint8_t *iv, size_t iv_len, void *schedule) } else { return; /* TODO: better error handling */ } - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_128_ctr(), NULL, ks->key, ks->iv)); + if (OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_128_ctr(), NULL, ks->key, ks->iv) != 1) { + OQS_EXIT("OpenSSL: EVP_EncryptInit_ex failed"); + } } static void AES128_CTR_inc_ivu64(uint64_t iv, void *schedule) { - OQS_EXIT_IF_NULLPTR(schedule, "OpenSSL"); + if (schedule == NULL) { + return; + } struct key_schedule *ks = (struct key_schedule *) schedule; br_enc64be(ks->iv, iv); memset(&ks->iv[8], 0, 8); - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_128_ctr(), NULL, ks->key, ks->iv)); + if (OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_128_ctr(), NULL, ks->key, ks->iv) != 1) { + OQS_EXIT("OpenSSL: EVP_EncryptInit_ex failed"); + } } static void AES256_ECB_load_schedule(const uint8_t *key, void **schedule) { + if (key == NULL || schedule == NULL) { + return; + } *schedule = OQS_MEM_malloc(sizeof(struct key_schedule)); - OQS_EXIT_IF_NULLPTR(*schedule, "OpenSSL"); + if (*schedule == NULL) { + OQS_EXIT("OpenSSL: Failed to allocate memory"); + } struct key_schedule *ks = (struct key_schedule *) *schedule; ks->for_ECB = 1; ks->ctx = OSSL_FUNC(EVP_CIPHER_CTX_new)(); - OQS_EXIT_IF_NULLPTR(ks->ctx, "OpenSSL"); - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_256_ecb(), NULL, key, NULL)); + if (ks->ctx == NULL) { + OQS_MEM_free(*schedule); + OQS_EXIT("OpenSSL: EVP_CIPHER_CTX_new failed"); + } + if (OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_256_ecb(), NULL, key, NULL) != 1) { + OSSL_FUNC(EVP_CIPHER_CTX_free)(ks->ctx); + OQS_MEM_free(*schedule); + OQS_EXIT("OpenSSL: EVP_EncryptInit_ex failed"); + } OSSL_FUNC(EVP_CIPHER_CTX_set_padding)(ks->ctx, 0); } static void AES256_CTR_inc_init(const uint8_t *key, void **schedule) { + if (key == NULL || schedule == NULL) { + return; + } *schedule = OQS_MEM_malloc(sizeof(struct key_schedule)); - OQS_EXIT_IF_NULLPTR(*schedule, "OpenSSL"); + if (*schedule == NULL) { + OQS_EXIT("OpenSSL: Failed to allocate memory"); + } struct key_schedule *ks = (struct key_schedule *) *schedule; EVP_CIPHER_CTX *ctr_ctx = OSSL_FUNC(EVP_CIPHER_CTX_new)(); - OQS_EXIT_IF_NULLPTR(ctr_ctx, "OpenSSL"); + if (ctr_ctx == NULL) { + OQS_MEM_free(*schedule); + OQS_EXIT("OpenSSL: EVP_CIPHER_CTX_new failed"); + } ks->for_ECB = 0; ks->ctx = ctr_ctx; @@ -152,7 +230,9 @@ static void AES256_CTR_inc_init(const uint8_t *key, void **schedule) { } static void AES256_CTR_inc_iv(const uint8_t *iv, size_t iv_len, void *schedule) { - OQS_EXIT_IF_NULLPTR(schedule, "OpenSSL"); + if (iv == NULL || schedule == NULL) { + return; + } struct key_schedule *ks = (struct key_schedule *) schedule; if (iv_len == 12) { memcpy(ks->iv, iv, 12); @@ -162,15 +242,21 @@ static void AES256_CTR_inc_iv(const uint8_t *iv, size_t iv_len, void *schedule) } else { return; /* TODO: better error handling */ } - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_256_ctr(), NULL, ks->key, ks->iv)); + if (OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_256_ctr(), NULL, ks->key, ks->iv) != 1) { + OQS_EXIT("OpenSSL: EVP_EncryptInit_ex failed"); + } } static void AES256_CTR_inc_ivu64(uint64_t iv, void *schedule) { - OQS_EXIT_IF_NULLPTR(schedule, "OpenSSL"); + if (schedule == NULL) { + return; + } struct key_schedule *ks = (struct key_schedule *) schedule; br_enc64be(ks->iv, iv); memset(&ks->iv[8], 0, 8); - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_256_ctr(), NULL, ks->key, ks->iv)); + if (OSSL_FUNC(EVP_EncryptInit_ex)(ks->ctx, oqs_aes_256_ctr(), NULL, ks->key, ks->iv) != 1) { + OQS_EXIT("OpenSSL: EVP_EncryptInit_ex failed"); + } } static void AES256_free_schedule(void *schedule) { @@ -179,18 +265,30 @@ static void AES256_free_schedule(void *schedule) { } static void AES256_ECB_enc(const uint8_t *plaintext, const size_t plaintext_len, const uint8_t *key, uint8_t *ciphertext) { + if (plaintext == NULL || key == NULL || ciphertext == NULL) { + return; + } void *schedule = NULL; OQS_AES256_ECB_load_schedule(key, &schedule); + if (schedule == NULL) { + return; + } OQS_AES256_ECB_enc_sch(plaintext, plaintext_len, schedule, ciphertext); OQS_AES256_free_schedule(schedule); } static void AES256_ECB_enc_sch(const uint8_t *plaintext, const size_t plaintext_len, const void *schedule, uint8_t *ciphertext) { + if (plaintext == NULL || schedule == NULL || ciphertext == NULL) { + return; + } // actually same code as AES 128 OQS_AES128_ECB_enc_sch(plaintext, plaintext_len, schedule, ciphertext); } static void AES256_CTR_inc_stream_iv(const uint8_t *iv, size_t iv_len, const void *schedule, uint8_t *out, size_t out_len) { + if (iv == NULL || schedule == NULL || out == NULL) { + return; + } EVP_CIPHER_CTX *ctr_ctx = OSSL_FUNC(EVP_CIPHER_CTX_new)(); OQS_EXIT_IF_NULLPTR(ctr_ctx, "OpenSSL"); uint8_t iv_ctr[16]; @@ -203,20 +301,33 @@ static void AES256_CTR_inc_stream_iv(const uint8_t *iv, size_t iv_len, const voi } else if (iv_len == 16) { memcpy(iv_ctr, iv, 16); } else { + OSSL_FUNC(EVP_CIPHER_CTX_free)(ctr_ctx); return; /* TODO: better error handling */ } const struct key_schedule *ks = (const struct key_schedule *) schedule; - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptInit_ex)(ctr_ctx, oqs_aes_256_ctr(), NULL, ks->key, iv_ctr)); + if (OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptInit_ex)(ctr_ctx, oqs_aes_256_ctr(), NULL, ks->key, iv_ctr)) != 1) { + OSSL_FUNC(EVP_CIPHER_CTX_free)(ctr_ctx); + return; + } SIZE_T_TO_INT_OR_EXIT(out_len, out_len_input_int) memset(out, 0, (size_t)out_len_input_int); int out_len_output; - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptUpdate)(ctr_ctx, out, &out_len_output, out, out_len_input_int)); - OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptFinal_ex)(ctr_ctx, out + out_len_output, &out_len_output)); + if (OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptUpdate)(ctr_ctx, out, &out_len_output, out, out_len_input_int)) != 1) { + OSSL_FUNC(EVP_CIPHER_CTX_free)(ctr_ctx); + return; + } + if (OQS_OPENSSL_GUARD(OSSL_FUNC(EVP_EncryptFinal_ex)(ctr_ctx, out + out_len_output, &out_len_output)) != 1) { + OSSL_FUNC(EVP_CIPHER_CTX_free)(ctr_ctx); + return; + } OSSL_FUNC(EVP_CIPHER_CTX_free)(ctr_ctx); } static void AES256_CTR_inc_stream_blks(void *schedule, uint8_t *out, size_t out_blks) { + if (schedule == NULL || out == NULL) { + return; + } size_t out_len = out_blks * 16; struct key_schedule *ks = (struct key_schedule *) schedule; int out_len_output; diff --git a/src/common/common.h b/src/common/common.h index 947b65b03..d912d9370 100644 --- a/src/common/common.h +++ b/src/common/common.h @@ -81,6 +81,16 @@ extern "C" { #define OQS_MEM_strdup(str) strdup(str) #endif + +/** + * Prints an error message to stderr and return. + * @param msg The error message to be printed. + */ +#define OQS_EXIT(msg) \ + { \ + fprintf(stderr, "%s", msg); \ + return; \ + } /** * Macro for terminating the program if x is * a null pointer.