diff --git a/src/ecmult_gen_impl.h b/src/ecmult_gen_impl.h index b218c6ec33..8d52107294 100644 --- a/src/ecmult_gen_impl.h +++ b/src/ecmult_gen_impl.h @@ -152,10 +152,11 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25 #if COMB_BITS > 256 } else if (EXPECT(bit_pos >= 256, 0)) { /* Some bit(s) of (mask(block) << comb_off) are outside of [0,256). This means - * we are also done constructing bits, but know its top bit is zero, and no + * we are also done constructing bits, but know its top bit should be zero, and no * flipping/negating is needed. The table lookup can also be done over a * smaller number of entries. */ - VERIFY_CHECK(bits < (1U << tooth)); + /* Mask out junk in bits variable. */ + bits &= ((1U << tooth) - 1); VERIFY_CHECK(bits < COMB_POINTS); for (index = 0; (index >> tooth) == 0; ++index) { secp256k1_ge_storage_cmov(&adds, &secp256k1_ecmult_gen_prec_table[block][index], index == bits); @@ -164,10 +165,16 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25 break; #endif } else { - /* Gather another bit. */ - uint32_t bit = secp256k1_scalar_get_bits(&recoded, bit_pos, 1); + /* Gather another bit. To reduce side-channels from single-bit reads, don't + * actually fetch a single bit, but read higher bits too, which are XOR'ed + * into the upper bits of bits. On every iteration, an addition bits is + * made correct, starting at the bottom. The bits above that contain junk. + * See https://www.usenix.org/system/files/conference/usenixsecurity18/sec18-alam.pdf + */ + uint32_t bitdata = secp256k1_scalar_get_bits(&recoded, bit_pos & ~0x1f, 32) >> (bit_pos & 0x1f); VERIFY_CHECK(bit_pos < COMB_BITS && bit_pos < 256); - bits |= bit << tooth; + bits &= ~(1 << tooth); + bits ^= bitdata << tooth; bit_pos += COMB_SPACING; ++tooth; }